diff --git a/LICENSES/vendor/github.com/Azure/go-ntlmssp/LICENSE b/LICENSES/vendor/github.com/Azure/go-ntlmssp/LICENSE new file mode 100644 index 0000000000000..5a2939deb7f46 --- /dev/null +++ b/LICENSES/vendor/github.com/Azure/go-ntlmssp/LICENSE @@ -0,0 +1,25 @@ += vendor/github.com/Azure/go-ntlmssp licensed under: = + +The MIT License (MIT) + +Copyright (c) 2016 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + += vendor/github.com/Azure/go-ntlmssp/LICENSE 7f7cc56311d298677f304d0ffce374d8 diff --git a/LICENSES/vendor/github.com/go-asn1-ber/asn1-ber/LICENSE b/LICENSES/vendor/github.com/go-asn1-ber/asn1-ber/LICENSE new file mode 100644 index 0000000000000..055d875bb132a --- /dev/null +++ b/LICENSES/vendor/github.com/go-asn1-ber/asn1-ber/LICENSE @@ -0,0 +1,26 @@ += vendor/github.com/go-asn1-ber/asn1-ber licensed under: = + +The MIT License (MIT) + +Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com) +Portions copyright (c) 2015-2016 go-asn1-ber Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + += vendor/github.com/go-asn1-ber/asn1-ber/LICENSE db7dfd3c609df968396fa379c3851eb5 diff --git a/LICENSES/vendor/github.com/go-ldap/ldap/v3/LICENSE b/LICENSES/vendor/github.com/go-ldap/ldap/v3/LICENSE new file mode 100644 index 0000000000000..fca294a60931e --- /dev/null +++ b/LICENSES/vendor/github.com/go-ldap/ldap/v3/LICENSE @@ -0,0 +1,26 @@ += vendor/github.com/go-ldap/ldap/v3 licensed under: = + +The MIT License (MIT) + +Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com) +Portions copyright (c) 2015-2016 go-ldap Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + += vendor/github.com/go-ldap/ldap/v3/LICENSE c3fcb38ec828f70d87d00a1c64cd9c2b diff --git a/LICENSES/vendor/github.com/openshift-eng/openshift-tests-extension/LICENSE b/LICENSES/vendor/github.com/openshift-eng/openshift-tests-extension/LICENSE new file mode 100644 index 0000000000000..f67fc367d04a2 --- /dev/null +++ b/LICENSES/vendor/github.com/openshift-eng/openshift-tests-extension/LICENSE @@ -0,0 +1,205 @@ += vendor/github.com/openshift-eng/openshift-tests-extension licensed under: = + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/openshift-eng/openshift-tests-extension/LICENSE 86d3f3a95c324c9479bd8986968f4327 diff --git a/LICENSES/vendor/github.com/openshift/api/LICENSE b/LICENSES/vendor/github.com/openshift/api/LICENSE new file mode 100644 index 0000000000000..6ab709a3c9d6c --- /dev/null +++ b/LICENSES/vendor/github.com/openshift/api/LICENSE @@ -0,0 +1,195 @@ += vendor/github.com/openshift/api licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2020 Red Hat, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/openshift/api/LICENSE 7ec75f465e2f8bee9a597f4b9d2921ba diff --git a/LICENSES/vendor/github.com/openshift/apiserver-library-go/LICENSE b/LICENSES/vendor/github.com/openshift/apiserver-library-go/LICENSE new file mode 100644 index 0000000000000..8c1731abc5e34 --- /dev/null +++ b/LICENSES/vendor/github.com/openshift/apiserver-library-go/LICENSE @@ -0,0 +1,205 @@ += vendor/github.com/openshift/apiserver-library-go licensed under: = + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/openshift/apiserver-library-go/LICENSE 86d3f3a95c324c9479bd8986968f4327 diff --git a/LICENSES/vendor/github.com/openshift/client-go/LICENSE b/LICENSES/vendor/github.com/openshift/client-go/LICENSE new file mode 100644 index 0000000000000..817bdf23d1ad9 --- /dev/null +++ b/LICENSES/vendor/github.com/openshift/client-go/LICENSE @@ -0,0 +1,195 @@ += vendor/github.com/openshift/client-go licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Red Hat, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/openshift/client-go/LICENSE 27bdb051f43ea9035ac160542145b43e diff --git a/LICENSES/vendor/github.com/openshift/library-go/LICENSE b/LICENSES/vendor/github.com/openshift/library-go/LICENSE new file mode 100644 index 0000000000000..9d7564fc4c5ea --- /dev/null +++ b/LICENSES/vendor/github.com/openshift/library-go/LICENSE @@ -0,0 +1,205 @@ += vendor/github.com/openshift/library-go licensed under: = + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/openshift/library-go/LICENSE 86d3f3a95c324c9479bd8986968f4327 diff --git a/LICENSES/vendor/go.uber.org/atomic/LICENSE b/LICENSES/vendor/go.uber.org/atomic/LICENSE new file mode 100644 index 0000000000000..d7259a2862c0f --- /dev/null +++ b/LICENSES/vendor/go.uber.org/atomic/LICENSE @@ -0,0 +1,23 @@ += vendor/go.uber.org/atomic licensed under: = + +Copyright (c) 2016 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + += vendor/go.uber.org/atomic/LICENSE.txt 1caee86519456feda989f8a838102b50 diff --git a/LICENSES/vendor/gopkg.in/yaml.v2/LICENSE b/LICENSES/vendor/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 0000000000000..88be4ca082d37 --- /dev/null +++ b/LICENSES/vendor/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,205 @@ += vendor/gopkg.in/yaml.v2 licensed under: = + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/gopkg.in/yaml.v2/LICENSE e3fc50a88d0a364313df4b21ef20c29e diff --git a/go.mod b/go.mod index 5c511d5740334..75e6adef4f109 100644 --- a/go.mod +++ b/go.mod @@ -48,10 +48,15 @@ require ( github.com/moby/sys/userns v0.1.0 github.com/mrunalp/fileutils v0.5.1 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 - github.com/onsi/ginkgo/v2 v2.21.0 + github.com/onsi/ginkgo/v2 v2.20.2 github.com/onsi/gomega v1.35.1 github.com/opencontainers/runc v1.2.1 github.com/opencontainers/selinux v1.11.1 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241121212100-2e43ae5f86e2 + github.com/openshift/api v0.0.0-20241212053709-6b333900129e + github.com/openshift/apiserver-library-go v0.0.0-20241212055705-41777f979e50 + github.com/openshift/client-go v0.0.0-20241212054934-9d86edf6d385 + github.com/openshift/library-go v0.0.0-20241212055402-9dbaddb63ab9 github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 github.com/prometheus/client_golang v1.19.1 @@ -83,23 +88,25 @@ require ( golang.org/x/term v0.25.0 golang.org/x/time v0.7.0 golang.org/x/tools v0.26.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 - google.golang.org/grpc v1.65.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 + google.golang.org/grpc v1.67.0 google.golang.org/protobuf v1.35.1 gopkg.in/evanphx/json-patch.v4 v4.12.0 + gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/square/go-jose.v2 v2.6.0 - k8s.io/api v0.0.0 - k8s.io/apiextensions-apiserver v0.0.0 - k8s.io/apimachinery v0.0.0 - k8s.io/apiserver v0.0.0 + gopkg.in/yaml.v2 v2.4.0 + k8s.io/api v0.32.0 + k8s.io/apiextensions-apiserver v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/apiserver v0.32.0 k8s.io/cli-runtime v0.0.0 - k8s.io/client-go v0.0.0 + k8s.io/client-go v0.32.0 k8s.io/cloud-provider v0.0.0 k8s.io/cluster-bootstrap v0.0.0 - k8s.io/code-generator v0.0.0 - k8s.io/component-base v0.0.0 - k8s.io/component-helpers v0.0.0 - k8s.io/controller-manager v0.0.0 + k8s.io/code-generator v0.32.0 + k8s.io/component-base v0.32.0 + k8s.io/component-helpers v0.32.0-rc.1 + k8s.io/controller-manager v0.32.0-rc.1 k8s.io/cri-api v0.0.0 k8s.io/cri-client v0.0.0 k8s.io/csi-translation-lib v0.0.0 @@ -107,8 +114,8 @@ require ( k8s.io/endpointslice v0.0.0 k8s.io/externaljwt v0.0.0 k8s.io/klog/v2 v2.130.1 - k8s.io/kms v0.0.0 - k8s.io/kube-aggregator v0.0.0 + k8s.io/kms v0.32.0 + k8s.io/kube-aggregator v0.32.0 k8s.io/kube-controller-manager v0.0.0 k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f k8s.io/kube-proxy v0.0.0 @@ -129,6 +136,7 @@ require ( require ( cel.dev/expr v0.18.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect @@ -150,7 +158,9 @@ require ( github.com/fatih/camelcase v1.0.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect github.com/go-errors/errors v1.4.2 // indirect + github.com/go-ldap/ldap/v3 v3.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -205,14 +215,14 @@ require ( go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect + go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/mod v0.21.0 // indirect golang.org/x/text v0.19.0 // indirect google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect @@ -223,6 +233,11 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + github.com/openshift/api => github.com/dusk125/api v0.0.0-20241212053709-6b333900129e + github.com/openshift/apiserver-library-go => github.com/dusk125/apiserver-library-go v0.0.0-20241212055705-41777f979e50 + github.com/openshift/client-go => github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385 + github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 k8s.io/api => ./staging/src/k8s.io/api k8s.io/apiextensions-apiserver => ./staging/src/k8s.io/apiextensions-apiserver k8s.io/apimachinery => ./staging/src/k8s.io/apimachinery diff --git a/go.sum b/go.sum index 21e2dbed8cbbf..be9e43a5f30c6 100644 --- a/go.sum +++ b/go.sum @@ -30,7 +30,7 @@ cloud.google.com/go/cloudbuild v1.15.0/go.mod h1:eIXYWmRt3UtggLnFGx4JvXcMj4kShhV cloud.google.com/go/clouddms v1.7.3/go.mod h1:fkN2HQQNUYInAU3NQ3vRLkV2iWs8lIdmBKOx4nrL6Hc= cloud.google.com/go/cloudtasks v1.12.4/go.mod h1:BEPu0Gtt2dU6FxZHNqqNdGqIG86qyWKBPGnsb7udGY0= cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= cloud.google.com/go/contactcenterinsights v1.12.1/go.mod h1:HHX5wrz5LHVAwfI2smIotQG9x8Qd6gYilaHcLLLmNis= cloud.google.com/go/container v1.29.0/go.mod h1:b1A1gJeTBXVLQ6GGw9/9M4FG94BEGsqJ5+t4d/3N7O4= cloud.google.com/go/containeranalysis v0.11.3/go.mod h1:kMeST7yWFQMGjiG9K7Eov+fPNQcGhb8mXj/UcTiWw9U= @@ -123,6 +123,8 @@ cloud.google.com/go/websecurityscanner v1.6.4/go.mod h1:mUiyMQ+dGpPPRkHgknIZeCzS cloud.google.com/go/workflows v1.12.3/go.mod h1:fmOUeeqEwPzIU81foMjTRQIdwQHADi/vEr1cx9R1m5g= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e h1:ZU22z/2YRFLyf/P4ZwUYSdNCWsMEI0VeyrFoI2rAhJQ= +github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab h1:UKkYhof1njT1/xq4SEg5z+VpTgjmNeHwPGRQl7takDI= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= @@ -134,9 +136,11 @@ github.com/Microsoft/hnslib v0.0.8 h1:EBrIiRB7i/UYIXEC2yw22dn+RLzOmsc5S0bw2xf0Qu github.com/Microsoft/hnslib v0.0.8/go.mod h1:EYveQJlhKh2obmEIRB3uKN6dBd9pj1frPsrTGFppKuk= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/RangelReale/osincli v0.0.0-20160924135400-fababb0555f2/go.mod h1:XyjUkMA8GN+tOOPXvnbi3XuRxWFvTJntqvTFnjmhzbk= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 h1:7Ip0wMmLHLRJdrloDxZfhMm0xrLXZS8+COSu2bXmEQs= @@ -175,7 +179,7 @@ github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObk github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/container-storage-interface/spec v1.9.0 h1:zKtX4STsq31Knz3gciCYCi1SXtO2HJDecIjDVboYavY= @@ -212,14 +216,25 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/distribution/v3 v3.0.0-20230511163743-f7717b7855ca/go.mod h1:t1IxPNGdTGez+YGKyJyQrtSSqisfMIm1hnFhvMPlxtE= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/docker v26.1.4+incompatible h1:vuTpXDuoga+Z38m1OZHzl7NKisKWaWlhjQk7IDPSLsU= github.com/docker/docker v26.1.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/dusk125/api v0.0.0-20241212053709-6b333900129e h1:VtQff4aOjCdx31u6zrt9hPzFx2Ullu1yep4x8bqrRqg= +github.com/dusk125/api v0.0.0-20241212053709-6b333900129e/go.mod h1:lvUN3WEfcZlZxWNEhBKGAbW1UqaIexBLqcYIMXQDh2c= +github.com/dusk125/apiserver-library-go v0.0.0-20241212055705-41777f979e50 h1:Orim/dwZOmFyeYfuqwaXc5ZA/S29Yx95wJenbxECpI4= +github.com/dusk125/apiserver-library-go v0.0.0-20241212055705-41777f979e50/go.mod h1:w4YCdvmWwxudrJnyPg5Sh3aXNXunCdvTZlYQk9m9H6U= +github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385 h1:PC6mGKxev1xAQV4YniBkEzGXkK+faFjw/N+RNbun25Y= +github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385/go.mod h1:yv2o2+uOZRWD4E30SHdQ66mtcpV1qL0Px03vYjrvM4s= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 h1:83mHQ9+8+Fd+6Zb5aNPiUhgjCUiRCHbe6HuTFA2us78= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9/go.mod h1:vbBfvIsLddBDFa0WF+id4m7KuQmNRsVUBH5zIZa2EcQ= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= @@ -227,27 +242,34 @@ github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRr github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-asn1-ber/asn1-ber v1.5.4 h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A= +github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap/v3 v3.4.3 h1:JCKUtJPIcyOuG7ctGabLKMgIlKnGumD/iGjuWeEruDI= +github.com/go-ldap/ldap/v3 v3.4.3/go.mod h1:7LdHfVt6iIOESVEe3Bs4Jp2sHEKgDeduAhgM1/f9qmo= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -276,7 +298,7 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -284,6 +306,12 @@ github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= +github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= +github.com/gonum/graph v0.0.0-20170401004347-50b27dea7ebb/go.mod h1:ye018NnX1zrbOLqwBvs2HqyyTouQgnL8C+qzYk1snPY= +github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks= +github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A= +github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cadvisor v0.51.0 h1:BspqSPdZoLKrnvuZNOvM/KiJ/A+RdixwagN20n+2H8k= @@ -308,6 +336,7 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -321,7 +350,9 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/ishidawataru/sctp v0.0.0-20230406120618-7ff4192f6ff2 h1:i2fYnDurfLlJH8AyyMOnkLHnHeP8Ff/DDpuZA/D3bPo= @@ -390,8 +421,6 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -404,17 +433,25 @@ github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8= github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/openshift-eng/openshift-tests-extension v0.0.0-20241121212100-2e43ae5f86e2 h1:3vmVPLYkx16VCiWgoaTa0I0T9K1uqrjk2hPbma/dcIw= +github.com/openshift-eng/openshift-tests-extension v0.0.0-20241121212100-2e43ae5f86e2/go.mod h1:1OhaNsaU9vuy/dlYZLEve7bgE2Ed+yTV5VSbYvGXt4s= +github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0/go.mod h1:wAR5JopumPtAZnu0Cjv2PSqV4p4QB09LMhc6fZZTXuA= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -424,6 +461,7 @@ github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -521,6 +559,7 @@ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVf go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -533,6 +572,7 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -556,6 +596,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= @@ -581,6 +622,7 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -621,18 +663,18 @@ google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f h1:jTm13A2itBi3La6yTGqn8bVSrc3ZZ1r8ENHlIXBfnRA= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -648,6 +690,7 @@ gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= @@ -671,6 +714,7 @@ sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1 sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/knftables v0.0.17 h1:wGchTyRF/iGTIjd+vRaR1m676HM7jB8soFtyr/148ic= sigs.k8s.io/knftables v0.0.17/go.mod h1:f/5ZLKYEUPUhVjUCg6l80ACdL7CIIyeL0DxfgojGRTk= +sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I= sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo= sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U= sigs.k8s.io/kustomize/cmd/config v0.15.0/go.mod h1:Jq57b0nPaoYUlOqg//0JtAh6iibboqMcfbtCYoWPM00= diff --git a/go.work.sum b/go.work.sum index 78665c9725dca..3872a734689a6 100644 --- a/go.work.sum +++ b/go.work.sum @@ -1,3 +1,4 @@ +cel.dev/expr v0.16.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= cloud.google.com/go/accessapproval v1.7.4 h1:ZvLvJ952zK8pFHINjpMBY5k7LTAp/6pBf50RDMRgBUI= cloud.google.com/go/accesscontextmanager v1.8.4 h1:Yo4g2XrBETBCqyWIibN3NHNPQKUfQqti0lI+70rubeE= @@ -24,7 +25,7 @@ cloud.google.com/go/cloudbuild v1.15.0 h1:9IHfEMWdCklJ1cwouoiQrnxmP0q3pH7JUt8Hqx cloud.google.com/go/clouddms v1.7.3 h1:xe/wJKz55VO1+L891a1EG9lVUgfHr9Ju/I3xh1nwF84= cloud.google.com/go/cloudtasks v1.12.4 h1:5xXuFfAjg0Z5Wb81j2GAbB3e0bwroCeSF+5jBn/L650= cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= cloud.google.com/go/contactcenterinsights v1.12.1 h1:EiGBeejtDDtr3JXt9W7xlhXyZ+REB5k2tBgVPVtmNb0= cloud.google.com/go/container v1.29.0 h1:jIltU529R2zBFvP8rhiG1mgeTcnT27KhU0H/1d6SQRg= cloud.google.com/go/containeranalysis v0.11.3 h1:5rhYLX+3a01drpREqBZVXR9YmWH45RnML++8NsCtuD8= @@ -116,9 +117,11 @@ cloud.google.com/go/webrisk v1.9.4 h1:iceR3k0BCRZgf2D/NiKviVMFfuNC9LmeNLtxUFRB/w cloud.google.com/go/websecurityscanner v1.6.4 h1:5Gp7h5j7jywxLUp6NTpjNPkgZb3ngl0tUSw6ICWvtJQ= cloud.google.com/go/workflows v1.12.3 h1:qocsqETmLAl34mSa01hKZjcqAvt699gaoFbooGGMvaM= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/RangelReale/osincli v0.0.0-20160924135400-fababb0555f2 h1:x8Brv0YNEe6jY3V/hQglIG2nd8g5E2Zj5ubGKkPQctQ= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= github.com/aws/aws-sdk-go-v2 v1.30.1 h1:4y/5Dvfrhd1MxRDD77SrfsDaj8kUkkljU7XE83NPV+o= github.com/aws/aws-sdk-go-v2/config v1.27.24 h1:NM9XicZ5o1CBU/MZaHwFtimRpWx9ohAUAqkG6AqSqPo= github.com/aws/aws-sdk-go-v2/credentials v1.17.24 h1:YclAsrnb1/GTQNt2nzv+756Iw4mF8AOzcDfweWwwm/M= @@ -134,24 +137,48 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.30.1 h1:+woJ607dllHJQtsnJLi52ycuqHMw github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/checkpoint-restore/go-criu/v6 v6.3.0 h1:mIdrSO2cPNWQY1truPg6uHLXyKHk3Z5Odx4wjKOASzA= +github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= +github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= +github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= +github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg= github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro= -github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= -github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/distribution/distribution/v3 v3.0.0-20230511163743-f7717b7855ca h1:yGaIDzPWkgU+yRvI2x/rGdOU1hl6bLZzm0mETEUSHwk= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= +github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/felixge/fgprof v0.9.4 h1:ocDNwMFlnA0NU0zSB3I52xkO4sFXk80VK9lXjLClu88= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= +github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= -github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac h1:Q0Jsdxl5jbxouNs1TQYt0gxesYMU4VXRbsTlgDloZ50= +github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82 h1:EvokxLQsaaQjcWVWSV38221VAK7qc2zhaO17bKys/18= +github.com/gonum/graph v0.0.0-20170401004347-50b27dea7ebb h1:NcVXNHJrvrcAv8SVYKzKT2zwtEXU1DK2J+azsK7oz2A= +github.com/gonum/internal v0.0.0-20181124074243-f884aa714029 h1:8jtTdc+Nfj9AR+0soOeia9UZSvYBvETVHZrugUowJ7M= +github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9 h1:7qnwS9+oeSiOIsiUMajT+0R7HR6hw5NegnKPmn/94oI= +github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9 h1:V2IgdyerlBa/MxaEFRbV5juy/C3MGdj4ePi+g6ePIp4= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465 h1:KwWnWVWCNtNq/ewIX7HIKnELmEx2nDP42yskD/pi7QE= +github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY= @@ -159,26 +186,50 @@ github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= +github.com/lyft/protoc-gen-star/v2 v2.0.4-0.20230330145011-496ad1ac90a4 h1:sIXJOMrYnQZJu7OB7ANSF4MYri2fTEGIsRLz6LwI4xE= +github.com/lyft/protoc-gen-star/v2 v2.0.4-0.20230330145011-496ad1ac90a4/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk= github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM= github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= +github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660 h1:F0zE2bmdVvaEd18VXuGYQdJJ1FYJu4MIDW9PYZWc9No= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= +github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0 h1:AHzMWDxNiAVscJL6+4wkvFRTpMnJqiaZFEKA/osaBXE= +github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= +github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= +github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457 h1:zf5N6UOrA487eEFacMePxjXAJctxKmyjKUsjA11Uzuk= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= +sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 h1:PFWFSkpArPNJxFX4ZKWAk9NSeRoZaXschn+ULa4xVek= sigs.k8s.io/kustomize/cmd/config v0.15.0 h1:WkdY8V2+8J+W00YbImXa2ke9oegfrHH79e+kywW7EdU= diff --git a/staging/src/k8s.io/api/go.mod b/staging/src/k8s.io/api/go.mod index 31bc21defa6a6..44e165c14c0e5 100644 --- a/staging/src/k8s.io/api/go.mod +++ b/staging/src/k8s.io/api/go.mod @@ -10,7 +10,7 @@ godebug winsymlink=0 require ( github.com/gogo/protobuf v1.3.2 - k8s.io/apimachinery v0.0.0 + k8s.io/apimachinery v0.32.0 ) require ( @@ -39,4 +39,7 @@ require ( sigs.k8s.io/yaml v1.4.0 // indirect ) -replace k8s.io/apimachinery => ../apimachinery +replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + k8s.io/apimachinery => ../apimachinery +) diff --git a/staging/src/k8s.io/api/go.sum b/staging/src/k8s.io/api/go.sum index c868125667cef..481e483765278 100644 --- a/staging/src/k8s.io/api/go.sum +++ b/staging/src/k8s.io/api/go.sum @@ -45,8 +45,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.mod b/staging/src/k8s.io/apiextensions-apiserver/go.mod index b970ec9f4ea3c..25b1d84e35889 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/go.mod +++ b/staging/src/k8s.io/apiextensions-apiserver/go.mod @@ -17,6 +17,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/google/gofuzz v1.2.0 github.com/google/uuid v1.6.0 + github.com/openshift/api v0.0.0-20241212053709-6b333900129e github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 @@ -27,12 +28,12 @@ require ( google.golang.org/grpc v1.65.0 google.golang.org/protobuf v1.35.1 gopkg.in/evanphx/json-patch.v4 v4.12.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 - k8s.io/apiserver v0.0.0 - k8s.io/client-go v0.0.0 - k8s.io/code-generator v0.0.0 - k8s.io/component-base v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/apiserver v0.32.0 + k8s.io/client-go v0.32.0 + k8s.io/code-generator v0.32.0 + k8s.io/component-base v0.32.0 k8s.io/klog/v2 v2.130.1 k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 @@ -79,6 +80,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/openshift/library-go v0.0.0-20241212055402-9dbaddb63ab9 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.19.1 // indirect @@ -104,6 +106,7 @@ require ( go.opentelemetry.io/otel/metric v1.28.0 // indirect go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.28.0 // indirect @@ -124,11 +127,15 @@ require ( gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect - k8s.io/kms v0.0.0 // indirect + k8s.io/kms v0.32.0 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + github.com/openshift/api => github.com/dusk125/api v0.0.0-20241212053709-6b333900129e + github.com/openshift/client-go => github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385 + github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/apiserver => ../apiserver @@ -136,4 +143,5 @@ replace ( k8s.io/code-generator => ../code-generator k8s.io/component-base => ../component-base k8s.io/kms => ../kms + k8s.io/kube-aggregator => ../kube-aggregator ) diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.sum b/staging/src/k8s.io/apiextensions-apiserver/go.sum index d142701e0b9dd..cb7e8161b7e21 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/go.sum +++ b/staging/src/k8s.io/apiextensions-apiserver/go.sum @@ -120,9 +120,11 @@ cloud.google.com/go/webrisk v1.9.4/go.mod h1:w7m4Ib4C+OseSr2GL66m0zMBywdrVNTDKsd cloud.google.com/go/websecurityscanner v1.6.4/go.mod h1:mUiyMQ+dGpPPRkHgknIZeCzSHJ45+fY4F52nZFDHm2o= cloud.google.com/go/workflows v1.12.3/go.mod h1:fmOUeeqEwPzIU81foMjTRQIdwQHADi/vEr1cx9R1m5g= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/RangelReale/osincli v0.0.0-20160924135400-fababb0555f2/go.mod h1:XyjUkMA8GN+tOOPXvnbi3XuRxWFvTJntqvTFnjmhzbk= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -159,6 +161,14 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/distribution/v3 v3.0.0-20230511163743-f7717b7855ca/go.mod h1:t1IxPNGdTGez+YGKyJyQrtSSqisfMIm1hnFhvMPlxtE= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/dusk125/api v0.0.0-20241212053709-6b333900129e h1:VtQff4aOjCdx31u6zrt9hPzFx2Ullu1yep4x8bqrRqg= +github.com/dusk125/api v0.0.0-20241212053709-6b333900129e/go.mod h1:lvUN3WEfcZlZxWNEhBKGAbW1UqaIexBLqcYIMXQDh2c= +github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385/go.mod h1:yv2o2+uOZRWD4E30SHdQ66mtcpV1qL0Px03vYjrvM4s= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 h1:83mHQ9+8+Fd+6Zb5aNPiUhgjCUiRCHbe6HuTFA2us78= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9/go.mod h1:vbBfvIsLddBDFa0WF+id4m7KuQmNRsVUBH5zIZa2EcQ= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= @@ -169,15 +179,20 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap/v3 v3.4.3/go.mod h1:7LdHfVt6iIOESVEe3Bs4Jp2sHEKgDeduAhgM1/f9qmo= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -212,6 +227,12 @@ github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= +github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= +github.com/gonum/graph v0.0.0-20170401004347-50b27dea7ebb/go.mod h1:ye018NnX1zrbOLqwBvs2HqyyTouQgnL8C+qzYk1snPY= +github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks= +github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A= +github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= @@ -231,6 +252,7 @@ github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAx github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -243,6 +265,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= @@ -280,19 +304,26 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.1.13/go.mod h1:R016aXacfp/gwQBYw2FDGa9m+n6atbLWrYY8hNMT/sA= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0/go.mod h1:wAR5JopumPtAZnu0Cjv2PSqV4p4QB09LMhc6fZZTXuA= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -302,6 +333,7 @@ github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= @@ -378,6 +410,7 @@ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVf go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -519,6 +552,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcp sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/staging/src/k8s.io/apimachinery/go.mod b/staging/src/k8s.io/apimachinery/go.mod index b55f733ec0d30..6d9b3e3cabf22 100644 --- a/staging/src/k8s.io/apimachinery/go.mod +++ b/staging/src/k8s.io/apimachinery/go.mod @@ -20,7 +20,7 @@ require ( github.com/google/uuid v1.6.0 github.com/moby/spdystream v0.5.0 github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f - github.com/onsi/ginkgo/v2 v2.21.0 + github.com/onsi/ginkgo/v2 v2.20.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 golang.org/x/net v0.30.0 @@ -57,3 +57,5 @@ require ( google.golang.org/protobuf v1.35.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) + +replace github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 diff --git a/staging/src/k8s.io/apimachinery/go.sum b/staging/src/k8s.io/apimachinery/go.sum index 5bc9679b3c2ba..5f4a1a13e0494 100644 --- a/staging/src/k8s.io/apimachinery/go.sum +++ b/staging/src/k8s.io/apimachinery/go.sum @@ -65,10 +65,10 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= diff --git a/staging/src/k8s.io/apiserver/go.mod b/staging/src/k8s.io/apiserver/go.mod index c3a4062671c9e..3ee10a2d5b5e5 100644 --- a/staging/src/k8s.io/apiserver/go.mod +++ b/staging/src/k8s.io/apiserver/go.mod @@ -27,6 +27,7 @@ require ( github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f + github.com/openshift/library-go v0.0.0-20241212055402-9dbaddb63ab9 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 go.etcd.io/etcd/api/v3 v3.5.16 @@ -40,6 +41,7 @@ require ( go.opentelemetry.io/otel/metric v1.28.0 go.opentelemetry.io/otel/sdk v1.28.0 go.opentelemetry.io/otel/trace v1.28.0 + go.uber.org/atomic v1.7.0 go.uber.org/zap v1.27.0 golang.org/x/crypto v0.28.0 golang.org/x/net v0.30.0 @@ -52,12 +54,12 @@ require ( gopkg.in/evanphx/json-patch.v4 v4.12.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/square/go-jose.v2 v2.6.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 - k8s.io/client-go v0.0.0 - k8s.io/component-base v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/client-go v0.32.0 + k8s.io/component-base v0.32.0 k8s.io/klog/v2 v2.130.1 - k8s.io/kms v0.0.0 + k8s.io/kms v0.32.0 k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 @@ -127,9 +129,15 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + github.com/openshift/api => github.com/dusk125/api v0.0.0-20241212053709-6b333900129e + github.com/openshift/client-go => github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385 + github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 k8s.io/api => ../api + k8s.io/apiextensions-apiserver => ../apiextensions-apiserver k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go k8s.io/component-base => ../component-base k8s.io/kms => ../kms + k8s.io/kube-aggregator => ../kube-aggregator ) diff --git a/staging/src/k8s.io/apiserver/go.sum b/staging/src/k8s.io/apiserver/go.sum index 11a3e430419b1..16117149a5246 100644 --- a/staging/src/k8s.io/apiserver/go.sum +++ b/staging/src/k8s.io/apiserver/go.sum @@ -120,9 +120,11 @@ cloud.google.com/go/webrisk v1.9.4/go.mod h1:w7m4Ib4C+OseSr2GL66m0zMBywdrVNTDKsd cloud.google.com/go/websecurityscanner v1.6.4/go.mod h1:mUiyMQ+dGpPPRkHgknIZeCzSHJ45+fY4F52nZFDHm2o= cloud.google.com/go/workflows v1.12.3/go.mod h1:fmOUeeqEwPzIU81foMjTRQIdwQHADi/vEr1cx9R1m5g= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/RangelReale/osincli v0.0.0-20160924135400-fababb0555f2/go.mod h1:XyjUkMA8GN+tOOPXvnbi3XuRxWFvTJntqvTFnjmhzbk= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -160,6 +162,13 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/distribution/v3 v3.0.0-20230511163743-f7717b7855ca/go.mod h1:t1IxPNGdTGez+YGKyJyQrtSSqisfMIm1hnFhvMPlxtE= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/dusk125/api v0.0.0-20241212053709-6b333900129e/go.mod h1:lvUN3WEfcZlZxWNEhBKGAbW1UqaIexBLqcYIMXQDh2c= +github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385/go.mod h1:yv2o2+uOZRWD4E30SHdQ66mtcpV1qL0Px03vYjrvM4s= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 h1:83mHQ9+8+Fd+6Zb5aNPiUhgjCUiRCHbe6HuTFA2us78= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9/go.mod h1:vbBfvIsLddBDFa0WF+id4m7KuQmNRsVUBH5zIZa2EcQ= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= @@ -170,15 +179,20 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap/v3 v3.4.3/go.mod h1:7LdHfVt6iIOESVEe3Bs4Jp2sHEKgDeduAhgM1/f9qmo= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -213,6 +227,12 @@ github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= +github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= +github.com/gonum/graph v0.0.0-20170401004347-50b27dea7ebb/go.mod h1:ye018NnX1zrbOLqwBvs2HqyyTouQgnL8C+qzYk1snPY= +github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks= +github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A= +github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= @@ -232,6 +252,7 @@ github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAx github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -244,6 +265,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= @@ -281,20 +304,27 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.1.13/go.mod h1:R016aXacfp/gwQBYw2FDGa9m+n6atbLWrYY8hNMT/sA= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0/go.mod h1:wAR5JopumPtAZnu0Cjv2PSqV4p4QB09LMhc6fZZTXuA= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -304,6 +334,7 @@ github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= @@ -380,6 +411,7 @@ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVf go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -519,6 +551,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcp sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/staging/src/k8s.io/cli-runtime/go.mod b/staging/src/k8s.io/cli-runtime/go.mod index ae376cadc89ee..3a7ae72092ea6 100644 --- a/staging/src/k8s.io/cli-runtime/go.mod +++ b/staging/src/k8s.io/cli-runtime/go.mod @@ -20,9 +20,9 @@ require ( golang.org/x/sync v0.8.0 golang.org/x/text v0.19.0 gopkg.in/evanphx/json-patch.v4 v4.12.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 - k8s.io/client-go v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/client-go v0.32.0 k8s.io/klog/v2 v2.130.1 k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 @@ -74,6 +74,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/cli-runtime/go.sum b/staging/src/k8s.io/cli-runtime/go.sum index fdd3895bceb06..bb7f2aa389f5a 100644 --- a/staging/src/k8s.io/cli-runtime/go.sum +++ b/staging/src/k8s.io/cli-runtime/go.sum @@ -87,10 +87,10 @@ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= diff --git a/staging/src/k8s.io/client-go/go.mod b/staging/src/k8s.io/client-go/go.mod index 50f736561a611..82baff7249105 100644 --- a/staging/src/k8s.io/client-go/go.mod +++ b/staging/src/k8s.io/client-go/go.mod @@ -28,8 +28,8 @@ require ( golang.org/x/time v0.7.0 google.golang.org/protobuf v1.35.1 gopkg.in/evanphx/json-patch.v4 v4.12.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 k8s.io/klog/v2 v2.130.1 k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 @@ -56,7 +56,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/onsi/ginkgo/v2 v2.21.0 // indirect + github.com/onsi/ginkgo/v2 v2.20.1 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/x448/float16 v0.8.4 // indirect @@ -68,6 +68,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery ) diff --git a/staging/src/k8s.io/client-go/go.sum b/staging/src/k8s.io/client-go/go.sum index 378e80b311a10..c27200bfb870a 100644 --- a/staging/src/k8s.io/client-go/go.sum +++ b/staging/src/k8s.io/client-go/go.sum @@ -74,10 +74,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= diff --git a/staging/src/k8s.io/cloud-provider/go.mod b/staging/src/k8s.io/cloud-provider/go.mod index a67e8c684893d..faecf8fc9535c 100644 --- a/staging/src/k8s.io/cloud-provider/go.mod +++ b/staging/src/k8s.io/cloud-provider/go.mod @@ -13,13 +13,13 @@ require ( github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 - k8s.io/apiserver v0.0.0 - k8s.io/client-go v0.0.0 - k8s.io/component-base v0.0.0 - k8s.io/component-helpers v0.0.0 - k8s.io/controller-manager v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/apiserver v0.32.0 + k8s.io/client-go v0.32.0 + k8s.io/component-base v0.32.0 + k8s.io/component-helpers v0.32.0-rc.1 + k8s.io/controller-manager v0.32.0-rc.1 k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 ) @@ -64,6 +64,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/openshift/library-go v0.0.0-20241212055402-9dbaddb63ab9 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.19.1 // indirect @@ -84,6 +85,7 @@ require ( go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.28.0 // indirect @@ -103,7 +105,7 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/kms v0.0.0 // indirect + k8s.io/kms v0.32.0 // indirect k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect @@ -112,7 +114,12 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + github.com/openshift/api => github.com/dusk125/api v0.0.0-20241212053709-6b333900129e + github.com/openshift/client-go => github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385 + github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 k8s.io/api => ../api + k8s.io/apiextensions-apiserver => ../apiextensions-apiserver k8s.io/apimachinery => ../apimachinery k8s.io/apiserver => ../apiserver k8s.io/client-go => ../client-go @@ -120,4 +127,5 @@ replace ( k8s.io/component-helpers => ../component-helpers k8s.io/controller-manager => ../controller-manager k8s.io/kms => ../kms + k8s.io/kube-aggregator => ../kube-aggregator ) diff --git a/staging/src/k8s.io/cloud-provider/go.sum b/staging/src/k8s.io/cloud-provider/go.sum index 018b07378daeb..f601046ebb519 100644 --- a/staging/src/k8s.io/cloud-provider/go.sum +++ b/staging/src/k8s.io/cloud-provider/go.sum @@ -3,8 +3,10 @@ cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/RangelReale/osincli v0.0.0-20160924135400-fababb0555f2/go.mod h1:XyjUkMA8GN+tOOPXvnbi3XuRxWFvTJntqvTFnjmhzbk= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -36,19 +38,31 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/distribution/v3 v3.0.0-20230511163743-f7717b7855ca/go.mod h1:t1IxPNGdTGez+YGKyJyQrtSSqisfMIm1hnFhvMPlxtE= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/dusk125/api v0.0.0-20241212053709-6b333900129e/go.mod h1:lvUN3WEfcZlZxWNEhBKGAbW1UqaIexBLqcYIMXQDh2c= +github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385/go.mod h1:yv2o2+uOZRWD4E30SHdQ66mtcpV1qL0Px03vYjrvM4s= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 h1:83mHQ9+8+Fd+6Zb5aNPiUhgjCUiRCHbe6HuTFA2us78= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9/go.mod h1:vbBfvIsLddBDFa0WF+id4m7KuQmNRsVUBH5zIZa2EcQ= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap/v3 v3.4.3/go.mod h1:7LdHfVt6iIOESVEe3Bs4Jp2sHEKgDeduAhgM1/f9qmo= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -75,6 +89,12 @@ github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= +github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= +github.com/gonum/graph v0.0.0-20170401004347-50b27dea7ebb/go.mod h1:ye018NnX1zrbOLqwBvs2HqyyTouQgnL8C+qzYk1snPY= +github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks= +github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A= +github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= @@ -91,6 +111,7 @@ github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgY github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -102,6 +123,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= @@ -136,17 +159,24 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.1.13/go.mod h1:R016aXacfp/gwQBYw2FDGa9m+n6atbLWrYY8hNMT/sA= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0/go.mod h1:wAR5JopumPtAZnu0Cjv2PSqV4p4QB09LMhc6fZZTXuA= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= @@ -155,6 +185,7 @@ github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= @@ -222,6 +253,7 @@ go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -312,6 +344,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcp sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/staging/src/k8s.io/cluster-bootstrap/go.mod b/staging/src/k8s.io/cluster-bootstrap/go.mod index 0bd2020cacbe3..aeddc89f02479 100644 --- a/staging/src/k8s.io/cluster-bootstrap/go.mod +++ b/staging/src/k8s.io/cluster-bootstrap/go.mod @@ -11,8 +11,8 @@ godebug winsymlink=0 require ( github.com/stretchr/testify v1.9.0 gopkg.in/square/go-jose.v2 v2.6.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 k8s.io/klog/v2 v2.130.1 ) @@ -40,6 +40,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery ) diff --git a/staging/src/k8s.io/cluster-bootstrap/go.sum b/staging/src/k8s.io/cluster-bootstrap/go.sum index 3cb8bab46e34a..e97475107f050 100644 --- a/staging/src/k8s.io/cluster-bootstrap/go.sum +++ b/staging/src/k8s.io/cluster-bootstrap/go.sum @@ -41,8 +41,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= diff --git a/staging/src/k8s.io/code-generator/examples/go.mod b/staging/src/k8s.io/code-generator/examples/go.mod index bd5004269a8dd..519322ec3a274 100644 --- a/staging/src/k8s.io/code-generator/examples/go.mod +++ b/staging/src/k8s.io/code-generator/examples/go.mod @@ -7,8 +7,8 @@ go 1.23.0 godebug default=go1.23 require ( - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 k8s.io/client-go v0.0.0 k8s.io/klog/v2 v2.130.1 k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f @@ -57,3 +57,13 @@ replace ( k8s.io/apimachinery => ../../apimachinery k8s.io/client-go => ../../client-go ) + +replace github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + +replace github.com/openshift/api => github.com/dusk125/api v0.0.0-20241212053709-6b333900129e + +replace github.com/openshift/client-go => github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385 + +replace github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 + +replace github.com/openshift/apiserver-library-go => github.com/dusk125/apiserver-library-go v0.0.0-20241212055705-41777f979e50 diff --git a/staging/src/k8s.io/code-generator/examples/go.sum b/staging/src/k8s.io/code-generator/examples/go.sum index 1b79bdfce540b..1cd0aeb01f50e 100644 --- a/staging/src/k8s.io/code-generator/examples/go.sum +++ b/staging/src/k8s.io/code-generator/examples/go.sum @@ -57,10 +57,10 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= diff --git a/staging/src/k8s.io/code-generator/go.mod b/staging/src/k8s.io/code-generator/go.mod index 1e23a31ca4320..832db3d7e60c6 100644 --- a/staging/src/k8s.io/code-generator/go.mod +++ b/staging/src/k8s.io/code-generator/go.mod @@ -15,7 +15,7 @@ require ( github.com/google/gofuzz v1.2.0 github.com/spf13/pflag v1.0.5 golang.org/x/text v0.19.0 - k8s.io/apimachinery v0.0.0 + k8s.io/apimachinery v0.32.0 k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 k8s.io/klog/v2 v2.130.1 k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f @@ -46,4 +46,7 @@ require ( sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect ) -replace k8s.io/apimachinery => ../apimachinery +replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + k8s.io/apimachinery => ../apimachinery +) diff --git a/staging/src/k8s.io/code-generator/go.sum b/staging/src/k8s.io/code-generator/go.sum index 268ddf453291f..9d4d545999d31 100644 --- a/staging/src/k8s.io/code-generator/go.sum +++ b/staging/src/k8s.io/code-generator/go.sum @@ -60,10 +60,10 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= diff --git a/staging/src/k8s.io/component-base/go.mod b/staging/src/k8s.io/component-base/go.mod index 314a8e221796a..4d685211e615d 100644 --- a/staging/src/k8s.io/component-base/go.mod +++ b/staging/src/k8s.io/component-base/go.mod @@ -29,8 +29,8 @@ require ( go.opentelemetry.io/otel/trace v1.28.0 go.uber.org/zap v1.27.0 golang.org/x/sys v0.26.0 - k8s.io/apimachinery v0.0.0 - k8s.io/client-go v0.0.0 + k8s.io/apimachinery v0.32.0 + k8s.io/client-go v0.32.0 k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 @@ -81,12 +81,13 @@ require ( gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.0.0 // indirect + k8s.io/api v0.32.0 // indirect k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/component-base/go.sum b/staging/src/k8s.io/component-base/go.sum index da154e5cc24e2..6497c2b58283d 100644 --- a/staging/src/k8s.io/component-base/go.sum +++ b/staging/src/k8s.io/component-base/go.sum @@ -106,10 +106,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/staging/src/k8s.io/component-helpers/go.mod b/staging/src/k8s.io/component-helpers/go.mod index 45639a04e4c4d..ef59b9d0a4342 100644 --- a/staging/src/k8s.io/component-helpers/go.mod +++ b/staging/src/k8s.io/component-helpers/go.mod @@ -11,9 +11,9 @@ godebug winsymlink=0 require ( github.com/google/go-cmp v0.6.0 github.com/stretchr/testify v1.9.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 - k8s.io/client-go v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/client-go v0.32.0 k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 ) @@ -57,6 +57,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/component-helpers/go.sum b/staging/src/k8s.io/component-helpers/go.sum index eb695ac7be4dc..d38cca163f245 100644 --- a/staging/src/k8s.io/component-helpers/go.sum +++ b/staging/src/k8s.io/component-helpers/go.sum @@ -66,10 +66,10 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/staging/src/k8s.io/controller-manager/go.mod b/staging/src/k8s.io/controller-manager/go.mod index 8af2b7eb8df1e..f3cc6be400fdc 100644 --- a/staging/src/k8s.io/controller-manager/go.mod +++ b/staging/src/k8s.io/controller-manager/go.mod @@ -12,11 +12,11 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 golang.org/x/oauth2 v0.23.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 - k8s.io/apiserver v0.0.0 - k8s.io/client-go v0.0.0 - k8s.io/component-base v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/apiserver v0.32.0 + k8s.io/client-go v0.32.0 + k8s.io/component-base v0.32.0 k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 ) @@ -79,6 +79,7 @@ require ( go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.28.0 // indirect @@ -104,6 +105,8 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/apiserver => ../apiserver diff --git a/staging/src/k8s.io/controller-manager/go.sum b/staging/src/k8s.io/controller-manager/go.sum index 723f969914195..ac846e8ef1aa6 100644 --- a/staging/src/k8s.io/controller-manager/go.sum +++ b/staging/src/k8s.io/controller-manager/go.sum @@ -33,6 +33,7 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9/go.mod h1:vbBfvIsLddBDFa0WF+id4m7KuQmNRsVUBH5zIZa2EcQ= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= @@ -132,10 +133,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -218,6 +219,7 @@ go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= diff --git a/staging/src/k8s.io/cri-client/go.mod b/staging/src/k8s.io/cri-client/go.mod index 3619c649a9a57..5fafa46775b09 100644 --- a/staging/src/k8s.io/cri-client/go.mod +++ b/staging/src/k8s.io/cri-client/go.mod @@ -17,10 +17,10 @@ require ( go.opentelemetry.io/otel/trace v1.28.0 golang.org/x/sys v0.26.0 google.golang.org/grpc v1.65.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 - k8s.io/client-go v0.0.0 - k8s.io/component-base v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/client-go v0.32.0 + k8s.io/component-base v0.32.0 k8s.io/cri-api v0.0.0 k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 @@ -83,6 +83,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/cri-client/go.sum b/staging/src/k8s.io/cri-client/go.sum index 0f949091505ba..dd0d12df07520 100644 --- a/staging/src/k8s.io/cri-client/go.sum +++ b/staging/src/k8s.io/cri-client/go.sum @@ -103,8 +103,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= diff --git a/staging/src/k8s.io/csi-translation-lib/go.mod b/staging/src/k8s.io/csi-translation-lib/go.mod index b35dfee98cf3c..717f1bee1f8ee 100644 --- a/staging/src/k8s.io/csi-translation-lib/go.mod +++ b/staging/src/k8s.io/csi-translation-lib/go.mod @@ -10,8 +10,8 @@ godebug winsymlink=0 require ( github.com/stretchr/testify v1.9.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 k8s.io/klog/v2 v2.130.1 ) @@ -39,6 +39,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery ) diff --git a/staging/src/k8s.io/csi-translation-lib/go.sum b/staging/src/k8s.io/csi-translation-lib/go.sum index ce95781b32c4c..04d191bf36c48 100644 --- a/staging/src/k8s.io/csi-translation-lib/go.sum +++ b/staging/src/k8s.io/csi-translation-lib/go.sum @@ -42,8 +42,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= diff --git a/staging/src/k8s.io/dynamic-resource-allocation/go.mod b/staging/src/k8s.io/dynamic-resource-allocation/go.mod index 6c6b54ba6972f..1745325e27d66 100644 --- a/staging/src/k8s.io/dynamic-resource-allocation/go.mod +++ b/staging/src/k8s.io/dynamic-resource-allocation/go.mod @@ -15,11 +15,11 @@ require ( github.com/onsi/gomega v1.35.1 github.com/stretchr/testify v1.9.0 google.golang.org/grpc v1.65.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 - k8s.io/apiserver v0.0.0 - k8s.io/client-go v0.0.0 - k8s.io/component-helpers v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/apiserver v0.32.0 + k8s.io/client-go v0.32.0 + k8s.io/component-helpers v0.32.0-rc.1 k8s.io/klog/v2 v2.130.1 k8s.io/kubelet v0.0.0 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 @@ -76,7 +76,7 @@ require ( gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/component-base v0.0.0 // indirect + k8s.io/component-base v0.32.0 // indirect k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect @@ -84,6 +84,8 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/apiserver => ../apiserver diff --git a/staging/src/k8s.io/dynamic-resource-allocation/go.sum b/staging/src/k8s.io/dynamic-resource-allocation/go.sum index 058dee2c6239c..e1f1394683d42 100644 --- a/staging/src/k8s.io/dynamic-resource-allocation/go.sum +++ b/staging/src/k8s.io/dynamic-resource-allocation/go.sum @@ -28,6 +28,7 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9/go.mod h1:vbBfvIsLddBDFa0WF+id4m7KuQmNRsVUBH5zIZa2EcQ= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= @@ -111,10 +112,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -177,6 +178,7 @@ go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8 go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= diff --git a/staging/src/k8s.io/endpointslice/go.mod b/staging/src/k8s.io/endpointslice/go.mod index 186bf8ab65c27..872c0db28fc4b 100644 --- a/staging/src/k8s.io/endpointslice/go.mod +++ b/staging/src/k8s.io/endpointslice/go.mod @@ -11,10 +11,10 @@ godebug winsymlink=0 require ( github.com/google/go-cmp v0.6.0 github.com/stretchr/testify v1.9.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 - k8s.io/client-go v0.0.0 - k8s.io/component-base v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/client-go v0.32.0 + k8s.io/component-base v0.32.0 k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 ) @@ -68,6 +68,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/endpointslice/go.sum b/staging/src/k8s.io/endpointslice/go.sum index 886f63594bf7f..856c30a737069 100644 --- a/staging/src/k8s.io/endpointslice/go.sum +++ b/staging/src/k8s.io/endpointslice/go.sum @@ -87,10 +87,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/staging/src/k8s.io/kms/internal/plugins/_mock/go.mod b/staging/src/k8s.io/kms/internal/plugins/_mock/go.mod index 24aa2bdf7013e..a4520f3a65c03 100644 --- a/staging/src/k8s.io/kms/internal/plugins/_mock/go.mod +++ b/staging/src/k8s.io/kms/internal/plugins/_mock/go.mod @@ -23,3 +23,13 @@ require ( ) replace k8s.io/kms => ../../../../kms + +replace github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + +replace github.com/openshift/api => github.com/dusk125/api v0.0.0-20241212053709-6b333900129e + +replace github.com/openshift/client-go => github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385 + +replace github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 + +replace github.com/openshift/apiserver-library-go => github.com/dusk125/apiserver-library-go v0.0.0-20241212055705-41777f979e50 diff --git a/staging/src/k8s.io/kube-aggregator/go.mod b/staging/src/k8s.io/kube-aggregator/go.mod index f40b6ef51a4f2..272e21a9af713 100644 --- a/staging/src/k8s.io/kube-aggregator/go.mod +++ b/staging/src/k8s.io/kube-aggregator/go.mod @@ -13,6 +13,7 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/google/go-cmp v0.6.0 github.com/google/gofuzz v1.2.0 + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 @@ -20,12 +21,12 @@ require ( go.opentelemetry.io/otel/sdk v1.28.0 go.opentelemetry.io/otel/trace v1.28.0 golang.org/x/net v0.30.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 - k8s.io/apiserver v0.0.0 - k8s.io/client-go v0.0.0 - k8s.io/code-generator v0.0.0 - k8s.io/component-base v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/apiserver v0.32.0 + k8s.io/client-go v0.32.0 + k8s.io/code-generator v0.32.0 + k8s.io/component-base v0.32.0 k8s.io/klog/v2 v2.130.1 k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 @@ -67,8 +68,8 @@ require ( github.com/moby/spdystream v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/openshift/library-go v0.0.0-20241212055402-9dbaddb63ab9 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.19.1 // indirect @@ -86,6 +87,7 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect go.opentelemetry.io/otel/metric v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.28.0 // indirect @@ -107,14 +109,19 @@ require ( gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect - k8s.io/kms v0.0.0 // indirect + k8s.io/kms v0.32.0 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + github.com/openshift/api => github.com/dusk125/api v0.0.0-20241212053709-6b333900129e + github.com/openshift/client-go => github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385 + github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 k8s.io/api => ../api + k8s.io/apiextensions-apiserver => ../apiextensions-apiserver k8s.io/apimachinery => ../apimachinery k8s.io/apiserver => ../apiserver k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/kube-aggregator/go.sum b/staging/src/k8s.io/kube-aggregator/go.sum index 77d538064ab84..bb64049b82c0a 100644 --- a/staging/src/k8s.io/kube-aggregator/go.sum +++ b/staging/src/k8s.io/kube-aggregator/go.sum @@ -2,8 +2,10 @@ cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/RangelReale/osincli v0.0.0-20160924135400-fababb0555f2/go.mod h1:XyjUkMA8GN+tOOPXvnbi3XuRxWFvTJntqvTFnjmhzbk= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -34,19 +36,31 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/distribution/v3 v3.0.0-20230511163743-f7717b7855ca/go.mod h1:t1IxPNGdTGez+YGKyJyQrtSSqisfMIm1hnFhvMPlxtE= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/dusk125/api v0.0.0-20241212053709-6b333900129e/go.mod h1:lvUN3WEfcZlZxWNEhBKGAbW1UqaIexBLqcYIMXQDh2c= +github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385/go.mod h1:yv2o2+uOZRWD4E30SHdQ66mtcpV1qL0Px03vYjrvM4s= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 h1:83mHQ9+8+Fd+6Zb5aNPiUhgjCUiRCHbe6HuTFA2us78= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9/go.mod h1:vbBfvIsLddBDFa0WF+id4m7KuQmNRsVUBH5zIZa2EcQ= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap/v3 v3.4.3/go.mod h1:7LdHfVt6iIOESVEe3Bs4Jp2sHEKgDeduAhgM1/f9qmo= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -73,6 +87,12 @@ github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= +github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= +github.com/gonum/graph v0.0.0-20170401004347-50b27dea7ebb/go.mod h1:ye018NnX1zrbOLqwBvs2HqyyTouQgnL8C+qzYk1snPY= +github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks= +github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A= +github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= @@ -89,6 +109,7 @@ github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgY github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -100,6 +121,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= @@ -135,17 +158,24 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.1.13/go.mod h1:R016aXacfp/gwQBYw2FDGa9m+n6atbLWrYY8hNMT/sA= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0/go.mod h1:wAR5JopumPtAZnu0Cjv2PSqV4p4QB09LMhc6fZZTXuA= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= @@ -154,6 +184,7 @@ github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= @@ -222,6 +253,7 @@ go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -314,6 +346,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcp sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/staging/src/k8s.io/kube-controller-manager/go.mod b/staging/src/k8s.io/kube-controller-manager/go.mod index dc62a84f39966..2047e991eeec4 100644 --- a/staging/src/k8s.io/kube-controller-manager/go.mod +++ b/staging/src/k8s.io/kube-controller-manager/go.mod @@ -9,9 +9,9 @@ godebug default=go1.23 godebug winsymlink=0 require ( - k8s.io/apimachinery v0.0.0 + k8s.io/apimachinery v0.32.0 k8s.io/cloud-provider v0.0.0 - k8s.io/controller-manager v0.0.0 + k8s.io/controller-manager v0.32.0-rc.1 ) require ( @@ -29,7 +29,7 @@ require ( golang.org/x/text v0.19.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/inf.v0 v0.9.1 // indirect - k8s.io/component-base v0.0.0 // indirect + k8s.io/component-base v0.32.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect @@ -38,6 +38,8 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/apiserver => ../apiserver diff --git a/staging/src/k8s.io/kube-controller-manager/go.sum b/staging/src/k8s.io/kube-controller-manager/go.sum index 3c66da350b101..804878159b648 100644 --- a/staging/src/k8s.io/kube-controller-manager/go.sum +++ b/staging/src/k8s.io/kube-controller-manager/go.sum @@ -15,6 +15,7 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9/go.mod h1:vbBfvIsLddBDFa0WF+id4m7KuQmNRsVUBH5zIZa2EcQ= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= @@ -67,8 +68,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -105,6 +106,7 @@ go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUis go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= diff --git a/staging/src/k8s.io/kube-proxy/go.mod b/staging/src/k8s.io/kube-proxy/go.mod index 21293460f9aae..b545e05ff04c0 100644 --- a/staging/src/k8s.io/kube-proxy/go.mod +++ b/staging/src/k8s.io/kube-proxy/go.mod @@ -9,8 +9,8 @@ godebug default=go1.23 godebug winsymlink=0 require ( - k8s.io/apimachinery v0.0.0 - k8s.io/component-base v0.0.0 + k8s.io/apimachinery v0.32.0 + k8s.io/component-base v0.32.0 ) require ( @@ -51,6 +51,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/kube-proxy/go.sum b/staging/src/k8s.io/kube-proxy/go.sum index 5e15223a67a35..f13925e2910a7 100644 --- a/staging/src/k8s.io/kube-proxy/go.sum +++ b/staging/src/k8s.io/kube-proxy/go.sum @@ -67,8 +67,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= diff --git a/staging/src/k8s.io/kube-scheduler/go.mod b/staging/src/k8s.io/kube-scheduler/go.mod index 31b3cf1a3a271..4307508ebdde3 100644 --- a/staging/src/k8s.io/kube-scheduler/go.mod +++ b/staging/src/k8s.io/kube-scheduler/go.mod @@ -10,9 +10,9 @@ godebug winsymlink=0 require ( github.com/google/go-cmp v0.6.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 - k8s.io/component-base v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/component-base v0.32.0 sigs.k8s.io/yaml v1.4.0 ) @@ -36,6 +36,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/kube-scheduler/go.sum b/staging/src/k8s.io/kube-scheduler/go.sum index 56805d17234f2..4a1a138b3f6d6 100644 --- a/staging/src/k8s.io/kube-scheduler/go.sum +++ b/staging/src/k8s.io/kube-scheduler/go.sum @@ -54,8 +54,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= diff --git a/staging/src/k8s.io/kubectl/go.mod b/staging/src/k8s.io/kubectl/go.mod index 40dcb04b9d93c..9d3e39c5b1041 100644 --- a/staging/src/k8s.io/kubectl/go.mod +++ b/staging/src/k8s.io/kubectl/go.mod @@ -22,7 +22,7 @@ require ( github.com/lithammer/dedent v1.1.0 github.com/mitchellh/go-wordwrap v1.0.1 github.com/moby/term v0.5.0 - github.com/onsi/ginkgo/v2 v2.21.0 + github.com/onsi/ginkgo/v2 v2.20.1 github.com/onsi/gomega v1.35.1 github.com/pkg/errors v0.9.1 github.com/russross/blackfriday/v2 v2.1.0 @@ -31,12 +31,12 @@ require ( github.com/stretchr/testify v1.9.0 golang.org/x/sys v0.26.0 gopkg.in/evanphx/json-patch.v4 v4.12.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 k8s.io/cli-runtime v0.0.0 - k8s.io/client-go v0.0.0 - k8s.io/component-base v0.0.0 - k8s.io/component-helpers v0.0.0 + k8s.io/client-go v0.32.0 + k8s.io/component-base v0.32.0 + k8s.io/component-helpers v0.32.0-rc.1 k8s.io/klog/v2 v2.130.1 k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f k8s.io/metrics v0.0.0 @@ -97,6 +97,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/cli-runtime => ../cli-runtime diff --git a/staging/src/k8s.io/kubectl/go.sum b/staging/src/k8s.io/kubectl/go.sum index f7008ed6bba77..e1104bcf3146d 100644 --- a/staging/src/k8s.io/kubectl/go.sum +++ b/staging/src/k8s.io/kubectl/go.sum @@ -116,12 +116,12 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= diff --git a/staging/src/k8s.io/kubelet/go.mod b/staging/src/k8s.io/kubelet/go.mod index 3e3bc92918e2b..9001f65538b38 100644 --- a/staging/src/k8s.io/kubelet/go.mod +++ b/staging/src/k8s.io/kubelet/go.mod @@ -13,11 +13,11 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/stretchr/testify v1.9.0 google.golang.org/grpc v1.65.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 - k8s.io/apiserver v0.0.0 - k8s.io/client-go v0.0.0 - k8s.io/component-base v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/apiserver v0.32.0 + k8s.io/client-go v0.32.0 + k8s.io/component-base v0.32.0 k8s.io/cri-api v0.0.0 k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 @@ -67,6 +67,8 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/apiserver => ../apiserver diff --git a/staging/src/k8s.io/kubelet/go.sum b/staging/src/k8s.io/kubelet/go.sum index 789ef1b188873..bb91712a46db7 100644 --- a/staging/src/k8s.io/kubelet/go.sum +++ b/staging/src/k8s.io/kubelet/go.sum @@ -26,6 +26,7 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9/go.mod h1:vbBfvIsLddBDFa0WF+id4m7KuQmNRsVUBH5zIZa2EcQ= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= @@ -106,9 +107,9 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -165,6 +166,7 @@ go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8 go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= diff --git a/staging/src/k8s.io/metrics/go.mod b/staging/src/k8s.io/metrics/go.mod index 192a37946b86f..e54761971d44a 100644 --- a/staging/src/k8s.io/metrics/go.mod +++ b/staging/src/k8s.io/metrics/go.mod @@ -11,10 +11,10 @@ godebug winsymlink=0 require ( github.com/gogo/protobuf v1.3.2 github.com/stretchr/testify v1.9.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 - k8s.io/client-go v0.0.0 - k8s.io/code-generator v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/client-go v0.32.0 + k8s.io/code-generator v0.32.0 ) require ( @@ -63,6 +63,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/metrics/go.sum b/staging/src/k8s.io/metrics/go.sum index 15be7434f3223..bfeda1c02887c 100644 --- a/staging/src/k8s.io/metrics/go.sum +++ b/staging/src/k8s.io/metrics/go.sum @@ -66,10 +66,10 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/staging/src/k8s.io/pod-security-admission/go.mod b/staging/src/k8s.io/pod-security-admission/go.mod index ebb5e81bb1c2e..3c8adcaacd372 100644 --- a/staging/src/k8s.io/pod-security-admission/go.mod +++ b/staging/src/k8s.io/pod-security-admission/go.mod @@ -14,11 +14,11 @@ require ( github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 - k8s.io/apiserver v0.0.0 - k8s.io/client-go v0.0.0 - k8s.io/component-base v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/apiserver v0.32.0 + k8s.io/client-go v0.32.0 + k8s.io/component-base v0.32.0 k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 sigs.k8s.io/yaml v1.4.0 @@ -61,6 +61,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/openshift/library-go v0.0.0-20241212055402-9dbaddb63ab9 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.19.1 // indirect @@ -81,6 +82,7 @@ require ( go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.28.0 // indirect @@ -100,7 +102,7 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/kms v0.0.0 // indirect + k8s.io/kms v0.32.0 // indirect k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect @@ -108,10 +110,16 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + github.com/openshift/api => github.com/dusk125/api v0.0.0-20241212053709-6b333900129e + github.com/openshift/client-go => github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385 + github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 k8s.io/api => ../api + k8s.io/apiextensions-apiserver => ../apiextensions-apiserver k8s.io/apimachinery => ../apimachinery k8s.io/apiserver => ../apiserver k8s.io/client-go => ../client-go k8s.io/component-base => ../component-base k8s.io/kms => ../kms + k8s.io/kube-aggregator => ../kube-aggregator ) diff --git a/staging/src/k8s.io/pod-security-admission/go.sum b/staging/src/k8s.io/pod-security-admission/go.sum index 723f969914195..f5cb0bdda8d61 100644 --- a/staging/src/k8s.io/pod-security-admission/go.sum +++ b/staging/src/k8s.io/pod-security-admission/go.sum @@ -2,8 +2,10 @@ cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/RangelReale/osincli v0.0.0-20160924135400-fababb0555f2/go.mod h1:XyjUkMA8GN+tOOPXvnbi3XuRxWFvTJntqvTFnjmhzbk= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -33,19 +35,31 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/distribution/v3 v3.0.0-20230511163743-f7717b7855ca/go.mod h1:t1IxPNGdTGez+YGKyJyQrtSSqisfMIm1hnFhvMPlxtE= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/dusk125/api v0.0.0-20241212053709-6b333900129e/go.mod h1:lvUN3WEfcZlZxWNEhBKGAbW1UqaIexBLqcYIMXQDh2c= +github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385/go.mod h1:yv2o2+uOZRWD4E30SHdQ66mtcpV1qL0Px03vYjrvM4s= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 h1:83mHQ9+8+Fd+6Zb5aNPiUhgjCUiRCHbe6HuTFA2us78= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9/go.mod h1:vbBfvIsLddBDFa0WF+id4m7KuQmNRsVUBH5zIZa2EcQ= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap/v3 v3.4.3/go.mod h1:7LdHfVt6iIOESVEe3Bs4Jp2sHEKgDeduAhgM1/f9qmo= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -72,6 +86,12 @@ github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= +github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= +github.com/gonum/graph v0.0.0-20170401004347-50b27dea7ebb/go.mod h1:ye018NnX1zrbOLqwBvs2HqyyTouQgnL8C+qzYk1snPY= +github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks= +github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A= +github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= @@ -88,6 +108,7 @@ github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgY github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -99,6 +120,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= @@ -132,17 +155,24 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.1.13/go.mod h1:R016aXacfp/gwQBYw2FDGa9m+n6atbLWrYY8hNMT/sA= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0/go.mod h1:wAR5JopumPtAZnu0Cjv2PSqV4p4QB09LMhc6fZZTXuA= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= @@ -151,6 +181,7 @@ github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= @@ -218,6 +249,7 @@ go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -307,6 +339,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcp sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/staging/src/k8s.io/sample-apiserver/go.mod b/staging/src/k8s.io/sample-apiserver/go.mod index df3797c3f894e..c9c030c5d6aaa 100644 --- a/staging/src/k8s.io/sample-apiserver/go.mod +++ b/staging/src/k8s.io/sample-apiserver/go.mod @@ -12,11 +12,11 @@ require ( github.com/google/gofuzz v1.2.0 github.com/spf13/cobra v1.8.1 github.com/stretchr/testify v1.9.0 - k8s.io/apimachinery v0.0.0 - k8s.io/apiserver v0.0.0 - k8s.io/client-go v0.0.0 - k8s.io/code-generator v0.0.0 - k8s.io/component-base v0.0.0 + k8s.io/apimachinery v0.32.0 + k8s.io/apiserver v0.32.0 + k8s.io/client-go v0.32.0 + k8s.io/code-generator v0.32.0 + k8s.io/component-base v0.32.0 k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 sigs.k8s.io/structured-merge-diff/v4 v4.4.2 @@ -59,6 +59,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/openshift/library-go v0.0.0-20241212055402-9dbaddb63ab9 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.19.1 // indirect @@ -80,6 +81,7 @@ require ( go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.28.0 // indirect @@ -101,21 +103,27 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.0.0 // indirect + k8s.io/api v0.32.0 // indirect k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kms v0.0.0 // indirect + k8s.io/kms v0.32.0 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 + github.com/openshift/api => github.com/dusk125/api v0.0.0-20241212053709-6b333900129e + github.com/openshift/client-go => github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385 + github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 k8s.io/api => ../api + k8s.io/apiextensions-apiserver => ../apiextensions-apiserver k8s.io/apimachinery => ../apimachinery k8s.io/apiserver => ../apiserver k8s.io/client-go => ../client-go k8s.io/code-generator => ../code-generator k8s.io/component-base => ../component-base k8s.io/kms => ../kms + k8s.io/kube-aggregator => ../kube-aggregator ) diff --git a/staging/src/k8s.io/sample-apiserver/go.sum b/staging/src/k8s.io/sample-apiserver/go.sum index 32e5c2b5f30e8..5eca33f3368a9 100644 --- a/staging/src/k8s.io/sample-apiserver/go.sum +++ b/staging/src/k8s.io/sample-apiserver/go.sum @@ -2,8 +2,10 @@ cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/RangelReale/osincli v0.0.0-20160924135400-fababb0555f2/go.mod h1:XyjUkMA8GN+tOOPXvnbi3XuRxWFvTJntqvTFnjmhzbk= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -33,19 +35,31 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/distribution/v3 v3.0.0-20230511163743-f7717b7855ca/go.mod h1:t1IxPNGdTGez+YGKyJyQrtSSqisfMIm1hnFhvMPlxtE= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/dusk125/api v0.0.0-20241212053709-6b333900129e/go.mod h1:lvUN3WEfcZlZxWNEhBKGAbW1UqaIexBLqcYIMXQDh2c= +github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385/go.mod h1:yv2o2+uOZRWD4E30SHdQ66mtcpV1qL0Px03vYjrvM4s= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 h1:83mHQ9+8+Fd+6Zb5aNPiUhgjCUiRCHbe6HuTFA2us78= +github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9/go.mod h1:vbBfvIsLddBDFa0WF+id4m7KuQmNRsVUBH5zIZa2EcQ= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap/v3 v3.4.3/go.mod h1:7LdHfVt6iIOESVEe3Bs4Jp2sHEKgDeduAhgM1/f9qmo= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -72,6 +86,12 @@ github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= +github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= +github.com/gonum/graph v0.0.0-20170401004347-50b27dea7ebb/go.mod h1:ye018NnX1zrbOLqwBvs2HqyyTouQgnL8C+qzYk1snPY= +github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks= +github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A= +github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= @@ -88,6 +108,7 @@ github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgY github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -99,6 +120,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= @@ -132,17 +155,24 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.1.13/go.mod h1:R016aXacfp/gwQBYw2FDGa9m+n6atbLWrYY8hNMT/sA= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0/go.mod h1:wAR5JopumPtAZnu0Cjv2PSqV4p4QB09LMhc6fZZTXuA= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= @@ -151,6 +181,7 @@ github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= @@ -219,6 +250,7 @@ go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -311,6 +343,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcp sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/staging/src/k8s.io/sample-cli-plugin/go.mod b/staging/src/k8s.io/sample-cli-plugin/go.mod index 38b33ba9042e6..1616ef2bfdc6c 100644 --- a/staging/src/k8s.io/sample-cli-plugin/go.mod +++ b/staging/src/k8s.io/sample-cli-plugin/go.mod @@ -12,7 +12,7 @@ require ( github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 k8s.io/cli-runtime v0.0.0 - k8s.io/client-go v0.0.0 + k8s.io/client-go v0.32.0 ) require ( @@ -60,8 +60,8 @@ require ( gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.0.0 // indirect - k8s.io/apimachinery v0.0.0 // indirect + k8s.io/api v0.32.0 // indirect + k8s.io/apimachinery v0.32.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect @@ -73,6 +73,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/cli-runtime => ../cli-runtime diff --git a/staging/src/k8s.io/sample-cli-plugin/go.sum b/staging/src/k8s.io/sample-cli-plugin/go.sum index fdd3895bceb06..bb7f2aa389f5a 100644 --- a/staging/src/k8s.io/sample-cli-plugin/go.sum +++ b/staging/src/k8s.io/sample-cli-plugin/go.sum @@ -87,10 +87,10 @@ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= diff --git a/staging/src/k8s.io/sample-controller/go.mod b/staging/src/k8s.io/sample-controller/go.mod index d0d27ffb89a19..a77f5ae50ecca 100644 --- a/staging/src/k8s.io/sample-controller/go.mod +++ b/staging/src/k8s.io/sample-controller/go.mod @@ -10,10 +10,10 @@ godebug winsymlink=0 require ( golang.org/x/time v0.7.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 - k8s.io/client-go v0.0.0 - k8s.io/code-generator v0.0.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/client-go v0.32.0 + k8s.io/code-generator v0.32.0 k8s.io/klog/v2 v2.130.1 ) @@ -61,6 +61,7 @@ require ( ) replace ( + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/sample-controller/go.sum b/staging/src/k8s.io/sample-controller/go.sum index a50a01ece01dd..04406641bd110 100644 --- a/staging/src/k8s.io/sample-controller/go.sum +++ b/staging/src/k8s.io/sample-controller/go.sum @@ -66,10 +66,10 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= +github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/vendor/OWNERS b/vendor/OWNERS index 163a7393ee884..048c98615c487 100644 --- a/vendor/OWNERS +++ b/vendor/OWNERS @@ -1,4 +1,4 @@ -# See the OWNERS docs at https://go.k8s.io/owners +See the OWNERS docs at https://go.k8s.io/owners options: # make root approval non-recursive diff --git a/vendor/github.com/Azure/go-ntlmssp/.travis.yml b/vendor/github.com/Azure/go-ntlmssp/.travis.yml new file mode 100644 index 0000000000000..23c95fe951bc0 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/.travis.yml @@ -0,0 +1,17 @@ +sudo: false + +language: go + +before_script: + - go get -u golang.org/x/lint/golint + +go: + - 1.10.x + - master + +script: + - test -z "$(gofmt -s -l . | tee /dev/stderr)" + - test -z "$(golint ./... | tee /dev/stderr)" + - go vet ./... + - go build -v ./... + - go test -v ./... diff --git a/vendor/github.com/Azure/go-ntlmssp/LICENSE b/vendor/github.com/Azure/go-ntlmssp/LICENSE new file mode 100644 index 0000000000000..dc1cf39d1359b --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/Azure/go-ntlmssp/README.md b/vendor/github.com/Azure/go-ntlmssp/README.md new file mode 100644 index 0000000000000..55cdcefab7053 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/README.md @@ -0,0 +1,29 @@ +# go-ntlmssp +Golang package that provides NTLM/Negotiate authentication over HTTP + +[![GoDoc](https://godoc.org/github.com/Azure/go-ntlmssp?status.svg)](https://godoc.org/github.com/Azure/go-ntlmssp) [![Build Status](https://travis-ci.org/Azure/go-ntlmssp.svg?branch=dev)](https://travis-ci.org/Azure/go-ntlmssp) + +Protocol details from https://msdn.microsoft.com/en-us/library/cc236621.aspx +Implementation hints from http://davenport.sourceforge.net/ntlm.html + +This package only implements authentication, no key exchange or encryption. It +only supports Unicode (UTF16LE) encoding of protocol strings, no OEM encoding. +This package implements NTLMv2. + +# Usage + +``` +url, user, password := "http://www.example.com/secrets", "robpike", "pw123" +client := &http.Client{ + Transport: ntlmssp.Negotiator{ + RoundTripper:&http.Transport{}, + }, +} + +req, _ := http.NewRequest("GET", url, nil) +req.SetBasicAuth(user, password) +res, _ := client.Do(req) +``` + +----- +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/go-ntlmssp/authenticate_message.go b/vendor/github.com/Azure/go-ntlmssp/authenticate_message.go new file mode 100644 index 0000000000000..1b0fe7dd22bd4 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/authenticate_message.go @@ -0,0 +1,183 @@ +package ntlmssp + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "encoding/hex" + "errors" + "strings" + "time" +) + +type authenicateMessage struct { + LmChallengeResponse []byte + NtChallengeResponse []byte + + TargetName string + UserName string + + // only set if negotiateFlag_NTLMSSP_NEGOTIATE_KEY_EXCH + EncryptedRandomSessionKey []byte + + NegotiateFlags negotiateFlags + + MIC []byte +} + +type authenticateMessageFields struct { + messageHeader + LmChallengeResponse varField + NtChallengeResponse varField + TargetName varField + UserName varField + Workstation varField + _ [8]byte + NegotiateFlags negotiateFlags +} + +func (m authenicateMessage) MarshalBinary() ([]byte, error) { + if !m.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEUNICODE) { + return nil, errors.New("Only unicode is supported") + } + + target, user := toUnicode(m.TargetName), toUnicode(m.UserName) + workstation := toUnicode("") + + ptr := binary.Size(&authenticateMessageFields{}) + f := authenticateMessageFields{ + messageHeader: newMessageHeader(3), + NegotiateFlags: m.NegotiateFlags, + LmChallengeResponse: newVarField(&ptr, len(m.LmChallengeResponse)), + NtChallengeResponse: newVarField(&ptr, len(m.NtChallengeResponse)), + TargetName: newVarField(&ptr, len(target)), + UserName: newVarField(&ptr, len(user)), + Workstation: newVarField(&ptr, len(workstation)), + } + + f.NegotiateFlags.Unset(negotiateFlagNTLMSSPNEGOTIATEVERSION) + + b := bytes.Buffer{} + if err := binary.Write(&b, binary.LittleEndian, &f); err != nil { + return nil, err + } + if err := binary.Write(&b, binary.LittleEndian, &m.LmChallengeResponse); err != nil { + return nil, err + } + if err := binary.Write(&b, binary.LittleEndian, &m.NtChallengeResponse); err != nil { + return nil, err + } + if err := binary.Write(&b, binary.LittleEndian, &target); err != nil { + return nil, err + } + if err := binary.Write(&b, binary.LittleEndian, &user); err != nil { + return nil, err + } + if err := binary.Write(&b, binary.LittleEndian, &workstation); err != nil { + return nil, err + } + + return b.Bytes(), nil +} + +//ProcessChallenge crafts an AUTHENTICATE message in response to the CHALLENGE message +//that was received from the server +func ProcessChallenge(challengeMessageData []byte, user, password string) ([]byte, error) { + if user == "" && password == "" { + return nil, errors.New("Anonymous authentication not supported") + } + + var cm challengeMessage + if err := cm.UnmarshalBinary(challengeMessageData); err != nil { + return nil, err + } + + if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATELMKEY) { + return nil, errors.New("Only NTLM v2 is supported, but server requested v1 (NTLMSSP_NEGOTIATE_LM_KEY)") + } + if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEKEYEXCH) { + return nil, errors.New("Key exchange requested but not supported (NTLMSSP_NEGOTIATE_KEY_EXCH)") + } + + am := authenicateMessage{ + UserName: user, + TargetName: cm.TargetName, + NegotiateFlags: cm.NegotiateFlags, + } + + timestamp := cm.TargetInfo[avIDMsvAvTimestamp] + if timestamp == nil { // no time sent, take current time + ft := uint64(time.Now().UnixNano()) / 100 + ft += 116444736000000000 // add time between unix & windows offset + timestamp = make([]byte, 8) + binary.LittleEndian.PutUint64(timestamp, ft) + } + + clientChallenge := make([]byte, 8) + rand.Reader.Read(clientChallenge) + + ntlmV2Hash := getNtlmV2Hash(password, user, cm.TargetName) + + am.NtChallengeResponse = computeNtlmV2Response(ntlmV2Hash, + cm.ServerChallenge[:], clientChallenge, timestamp, cm.TargetInfoRaw) + + if cm.TargetInfoRaw == nil { + am.LmChallengeResponse = computeLmV2Response(ntlmV2Hash, + cm.ServerChallenge[:], clientChallenge) + } + return am.MarshalBinary() +} + +func ProcessChallengeWithHash(challengeMessageData []byte, user, hash string) ([]byte, error) { + if user == "" && hash == "" { + return nil, errors.New("Anonymous authentication not supported") + } + + var cm challengeMessage + if err := cm.UnmarshalBinary(challengeMessageData); err != nil { + return nil, err + } + + if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATELMKEY) { + return nil, errors.New("Only NTLM v2 is supported, but server requested v1 (NTLMSSP_NEGOTIATE_LM_KEY)") + } + if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEKEYEXCH) { + return nil, errors.New("Key exchange requested but not supported (NTLMSSP_NEGOTIATE_KEY_EXCH)") + } + + am := authenicateMessage{ + UserName: user, + TargetName: cm.TargetName, + NegotiateFlags: cm.NegotiateFlags, + } + + timestamp := cm.TargetInfo[avIDMsvAvTimestamp] + if timestamp == nil { // no time sent, take current time + ft := uint64(time.Now().UnixNano()) / 100 + ft += 116444736000000000 // add time between unix & windows offset + timestamp = make([]byte, 8) + binary.LittleEndian.PutUint64(timestamp, ft) + } + + clientChallenge := make([]byte, 8) + rand.Reader.Read(clientChallenge) + + hashParts := strings.Split(hash, ":") + if len(hashParts) > 1 { + hash = hashParts[1] + } + hashBytes, err := hex.DecodeString(hash) + if err != nil { + return nil, err + } + ntlmV2Hash := hmacMd5(hashBytes, toUnicode(strings.ToUpper(user)+cm.TargetName)) + + am.NtChallengeResponse = computeNtlmV2Response(ntlmV2Hash, + cm.ServerChallenge[:], clientChallenge, timestamp, cm.TargetInfoRaw) + + if cm.TargetInfoRaw == nil { + am.LmChallengeResponse = computeLmV2Response(ntlmV2Hash, + cm.ServerChallenge[:], clientChallenge) + } + return am.MarshalBinary() +} diff --git a/vendor/github.com/Azure/go-ntlmssp/authheader.go b/vendor/github.com/Azure/go-ntlmssp/authheader.go new file mode 100644 index 0000000000000..c9d30d32421c2 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/authheader.go @@ -0,0 +1,66 @@ +package ntlmssp + +import ( + "encoding/base64" + "strings" +) + +type authheader []string + +func (h authheader) IsBasic() bool { + for _, s := range h { + if strings.HasPrefix(string(s), "Basic ") { + return true + } + } + return false +} + +func (h authheader) Basic() string { + for _, s := range h { + if strings.HasPrefix(string(s), "Basic ") { + return s + } + } + return "" +} + +func (h authheader) IsNegotiate() bool { + for _, s := range h { + if strings.HasPrefix(string(s), "Negotiate") { + return true + } + } + return false +} + +func (h authheader) IsNTLM() bool { + for _, s := range h { + if strings.HasPrefix(string(s), "NTLM") { + return true + } + } + return false +} + +func (h authheader) GetData() ([]byte, error) { + for _, s := range h { + if strings.HasPrefix(string(s), "NTLM") || strings.HasPrefix(string(s), "Negotiate") || strings.HasPrefix(string(s), "Basic ") { + p := strings.Split(string(s), " ") + if len(p) < 2 { + return nil, nil + } + return base64.StdEncoding.DecodeString(string(p[1])) + } + } + return nil, nil +} + +func (h authheader) GetBasicCreds() (username, password string, err error) { + d, err := h.GetData() + if err != nil { + return "", "", err + } + parts := strings.SplitN(string(d), ":", 2) + return parts[0], parts[1], nil +} diff --git a/vendor/github.com/Azure/go-ntlmssp/avids.go b/vendor/github.com/Azure/go-ntlmssp/avids.go new file mode 100644 index 0000000000000..196b5f131635f --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/avids.go @@ -0,0 +1,17 @@ +package ntlmssp + +type avID uint16 + +const ( + avIDMsvAvEOL avID = iota + avIDMsvAvNbComputerName + avIDMsvAvNbDomainName + avIDMsvAvDNSComputerName + avIDMsvAvDNSDomainName + avIDMsvAvDNSTreeName + avIDMsvAvFlags + avIDMsvAvTimestamp + avIDMsvAvSingleHost + avIDMsvAvTargetName + avIDMsvChannelBindings +) diff --git a/vendor/github.com/Azure/go-ntlmssp/challenge_message.go b/vendor/github.com/Azure/go-ntlmssp/challenge_message.go new file mode 100644 index 0000000000000..053b55e4adfbf --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/challenge_message.go @@ -0,0 +1,82 @@ +package ntlmssp + +import ( + "bytes" + "encoding/binary" + "fmt" +) + +type challengeMessageFields struct { + messageHeader + TargetName varField + NegotiateFlags negotiateFlags + ServerChallenge [8]byte + _ [8]byte + TargetInfo varField +} + +func (m challengeMessageFields) IsValid() bool { + return m.messageHeader.IsValid() && m.MessageType == 2 +} + +type challengeMessage struct { + challengeMessageFields + TargetName string + TargetInfo map[avID][]byte + TargetInfoRaw []byte +} + +func (m *challengeMessage) UnmarshalBinary(data []byte) error { + r := bytes.NewReader(data) + err := binary.Read(r, binary.LittleEndian, &m.challengeMessageFields) + if err != nil { + return err + } + if !m.challengeMessageFields.IsValid() { + return fmt.Errorf("Message is not a valid challenge message: %+v", m.challengeMessageFields.messageHeader) + } + + if m.challengeMessageFields.TargetName.Len > 0 { + m.TargetName, err = m.challengeMessageFields.TargetName.ReadStringFrom(data, m.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEUNICODE)) + if err != nil { + return err + } + } + + if m.challengeMessageFields.TargetInfo.Len > 0 { + d, err := m.challengeMessageFields.TargetInfo.ReadFrom(data) + m.TargetInfoRaw = d + if err != nil { + return err + } + m.TargetInfo = make(map[avID][]byte) + r := bytes.NewReader(d) + for { + var id avID + var l uint16 + err = binary.Read(r, binary.LittleEndian, &id) + if err != nil { + return err + } + if id == avIDMsvAvEOL { + break + } + + err = binary.Read(r, binary.LittleEndian, &l) + if err != nil { + return err + } + value := make([]byte, l) + n, err := r.Read(value) + if err != nil { + return err + } + if n != int(l) { + return fmt.Errorf("Expected to read %d bytes, got only %d", l, n) + } + m.TargetInfo[id] = value + } + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ntlmssp/messageheader.go b/vendor/github.com/Azure/go-ntlmssp/messageheader.go new file mode 100644 index 0000000000000..247e284652cb3 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/messageheader.go @@ -0,0 +1,21 @@ +package ntlmssp + +import ( + "bytes" +) + +var signature = [8]byte{'N', 'T', 'L', 'M', 'S', 'S', 'P', 0} + +type messageHeader struct { + Signature [8]byte + MessageType uint32 +} + +func (h messageHeader) IsValid() bool { + return bytes.Equal(h.Signature[:], signature[:]) && + h.MessageType > 0 && h.MessageType < 4 +} + +func newMessageHeader(messageType uint32) messageHeader { + return messageHeader{signature, messageType} +} diff --git a/vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go b/vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go new file mode 100644 index 0000000000000..5905c023d69ef --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go @@ -0,0 +1,52 @@ +package ntlmssp + +type negotiateFlags uint32 + +const ( + /*A*/ negotiateFlagNTLMSSPNEGOTIATEUNICODE negotiateFlags = 1 << 0 + /*B*/ negotiateFlagNTLMNEGOTIATEOEM = 1 << 1 + /*C*/ negotiateFlagNTLMSSPREQUESTTARGET = 1 << 2 + + /*D*/ + negotiateFlagNTLMSSPNEGOTIATESIGN = 1 << 4 + /*E*/ negotiateFlagNTLMSSPNEGOTIATESEAL = 1 << 5 + /*F*/ negotiateFlagNTLMSSPNEGOTIATEDATAGRAM = 1 << 6 + /*G*/ negotiateFlagNTLMSSPNEGOTIATELMKEY = 1 << 7 + + /*H*/ + negotiateFlagNTLMSSPNEGOTIATENTLM = 1 << 9 + + /*J*/ + negotiateFlagANONYMOUS = 1 << 11 + /*K*/ negotiateFlagNTLMSSPNEGOTIATEOEMDOMAINSUPPLIED = 1 << 12 + /*L*/ negotiateFlagNTLMSSPNEGOTIATEOEMWORKSTATIONSUPPLIED = 1 << 13 + + /*M*/ + negotiateFlagNTLMSSPNEGOTIATEALWAYSSIGN = 1 << 15 + /*N*/ negotiateFlagNTLMSSPTARGETTYPEDOMAIN = 1 << 16 + /*O*/ negotiateFlagNTLMSSPTARGETTYPESERVER = 1 << 17 + + /*P*/ + negotiateFlagNTLMSSPNEGOTIATEEXTENDEDSESSIONSECURITY = 1 << 19 + /*Q*/ negotiateFlagNTLMSSPNEGOTIATEIDENTIFY = 1 << 20 + + /*R*/ + negotiateFlagNTLMSSPREQUESTNONNTSESSIONKEY = 1 << 22 + /*S*/ negotiateFlagNTLMSSPNEGOTIATETARGETINFO = 1 << 23 + + /*T*/ + negotiateFlagNTLMSSPNEGOTIATEVERSION = 1 << 25 + + /*U*/ + negotiateFlagNTLMSSPNEGOTIATE128 = 1 << 29 + /*V*/ negotiateFlagNTLMSSPNEGOTIATEKEYEXCH = 1 << 30 + /*W*/ negotiateFlagNTLMSSPNEGOTIATE56 = 1 << 31 +) + +func (field negotiateFlags) Has(flags negotiateFlags) bool { + return field&flags == flags +} + +func (field *negotiateFlags) Unset(flags negotiateFlags) { + *field = *field ^ (*field & flags) +} diff --git a/vendor/github.com/Azure/go-ntlmssp/negotiate_message.go b/vendor/github.com/Azure/go-ntlmssp/negotiate_message.go new file mode 100644 index 0000000000000..e466a9861d83c --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/negotiate_message.go @@ -0,0 +1,64 @@ +package ntlmssp + +import ( + "bytes" + "encoding/binary" + "errors" + "strings" +) + +const expMsgBodyLen = 40 + +type negotiateMessageFields struct { + messageHeader + NegotiateFlags negotiateFlags + + Domain varField + Workstation varField + + Version +} + +var defaultFlags = negotiateFlagNTLMSSPNEGOTIATETARGETINFO | + negotiateFlagNTLMSSPNEGOTIATE56 | + negotiateFlagNTLMSSPNEGOTIATE128 | + negotiateFlagNTLMSSPNEGOTIATEUNICODE | + negotiateFlagNTLMSSPNEGOTIATEEXTENDEDSESSIONSECURITY + +//NewNegotiateMessage creates a new NEGOTIATE message with the +//flags that this package supports. +func NewNegotiateMessage(domainName, workstationName string) ([]byte, error) { + payloadOffset := expMsgBodyLen + flags := defaultFlags + + if domainName != "" { + flags |= negotiateFlagNTLMSSPNEGOTIATEOEMDOMAINSUPPLIED + } + + if workstationName != "" { + flags |= negotiateFlagNTLMSSPNEGOTIATEOEMWORKSTATIONSUPPLIED + } + + msg := negotiateMessageFields{ + messageHeader: newMessageHeader(1), + NegotiateFlags: flags, + Domain: newVarField(&payloadOffset, len(domainName)), + Workstation: newVarField(&payloadOffset, len(workstationName)), + Version: DefaultVersion(), + } + + b := bytes.Buffer{} + if err := binary.Write(&b, binary.LittleEndian, &msg); err != nil { + return nil, err + } + if b.Len() != expMsgBodyLen { + return nil, errors.New("incorrect body length") + } + + payload := strings.ToUpper(domainName + workstationName) + if _, err := b.WriteString(payload); err != nil { + return nil, err + } + + return b.Bytes(), nil +} diff --git a/vendor/github.com/Azure/go-ntlmssp/negotiator.go b/vendor/github.com/Azure/go-ntlmssp/negotiator.go new file mode 100644 index 0000000000000..a5a5f5b750037 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/negotiator.go @@ -0,0 +1,144 @@ +package ntlmssp + +import ( + "bytes" + "encoding/base64" + "io" + "io/ioutil" + "net/http" + "strings" +) + +// GetDomain : parse domain name from based on slashes in the input +func GetDomain(user string) (string, string) { + domain := "" + + if strings.Contains(user, "\\") { + ucomponents := strings.SplitN(user, "\\", 2) + domain = ucomponents[0] + user = ucomponents[1] + } + return user, domain +} + +//Negotiator is a http.Roundtripper decorator that automatically +//converts basic authentication to NTLM/Negotiate authentication when appropriate. +type Negotiator struct{ http.RoundTripper } + +//RoundTrip sends the request to the server, handling any authentication +//re-sends as needed. +func (l Negotiator) RoundTrip(req *http.Request) (res *http.Response, err error) { + // Use default round tripper if not provided + rt := l.RoundTripper + if rt == nil { + rt = http.DefaultTransport + } + // If it is not basic auth, just round trip the request as usual + reqauth := authheader(req.Header.Values("Authorization")) + if !reqauth.IsBasic() { + return rt.RoundTrip(req) + } + reqauthBasic := reqauth.Basic() + // Save request body + body := bytes.Buffer{} + if req.Body != nil { + _, err = body.ReadFrom(req.Body) + if err != nil { + return nil, err + } + + req.Body.Close() + req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + } + // first try anonymous, in case the server still finds us + // authenticated from previous traffic + req.Header.Del("Authorization") + res, err = rt.RoundTrip(req) + if err != nil { + return nil, err + } + if res.StatusCode != http.StatusUnauthorized { + return res, err + } + resauth := authheader(res.Header.Values("Www-Authenticate")) + if !resauth.IsNegotiate() && !resauth.IsNTLM() { + // Unauthorized, Negotiate not requested, let's try with basic auth + req.Header.Set("Authorization", string(reqauthBasic)) + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + + res, err = rt.RoundTrip(req) + if err != nil { + return nil, err + } + if res.StatusCode != http.StatusUnauthorized { + return res, err + } + resauth = authheader(res.Header.Values("Www-Authenticate")) + } + + if resauth.IsNegotiate() || resauth.IsNTLM() { + // 401 with request:Basic and response:Negotiate + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + + // recycle credentials + u, p, err := reqauth.GetBasicCreds() + if err != nil { + return nil, err + } + + // get domain from username + domain := "" + u, domain = GetDomain(u) + + // send negotiate + negotiateMessage, err := NewNegotiateMessage(domain, "") + if err != nil { + return nil, err + } + if resauth.IsNTLM() { + req.Header.Set("Authorization", "NTLM "+base64.StdEncoding.EncodeToString(negotiateMessage)) + } else { + req.Header.Set("Authorization", "Negotiate "+base64.StdEncoding.EncodeToString(negotiateMessage)) + } + + req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + + res, err = rt.RoundTrip(req) + if err != nil { + return nil, err + } + + // receive challenge? + resauth = authheader(res.Header.Values("Www-Authenticate")) + challengeMessage, err := resauth.GetData() + if err != nil { + return nil, err + } + if !(resauth.IsNegotiate() || resauth.IsNTLM()) || len(challengeMessage) == 0 { + // Negotiation failed, let client deal with response + return res, nil + } + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + + // send authenticate + authenticateMessage, err := ProcessChallenge(challengeMessage, u, p) + if err != nil { + return nil, err + } + if resauth.IsNTLM() { + req.Header.Set("Authorization", "NTLM "+base64.StdEncoding.EncodeToString(authenticateMessage)) + } else { + req.Header.Set("Authorization", "Negotiate "+base64.StdEncoding.EncodeToString(authenticateMessage)) + } + + req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + + return rt.RoundTrip(req) + } + + return res, err +} diff --git a/vendor/github.com/Azure/go-ntlmssp/nlmp.go b/vendor/github.com/Azure/go-ntlmssp/nlmp.go new file mode 100644 index 0000000000000..1e65abe8b5351 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/nlmp.go @@ -0,0 +1,51 @@ +// Package ntlmssp provides NTLM/Negotiate authentication over HTTP +// +// Protocol details from https://msdn.microsoft.com/en-us/library/cc236621.aspx, +// implementation hints from http://davenport.sourceforge.net/ntlm.html . +// This package only implements authentication, no key exchange or encryption. It +// only supports Unicode (UTF16LE) encoding of protocol strings, no OEM encoding. +// This package implements NTLMv2. +package ntlmssp + +import ( + "crypto/hmac" + "crypto/md5" + "golang.org/x/crypto/md4" + "strings" +) + +func getNtlmV2Hash(password, username, target string) []byte { + return hmacMd5(getNtlmHash(password), toUnicode(strings.ToUpper(username)+target)) +} + +func getNtlmHash(password string) []byte { + hash := md4.New() + hash.Write(toUnicode(password)) + return hash.Sum(nil) +} + +func computeNtlmV2Response(ntlmV2Hash, serverChallenge, clientChallenge, + timestamp, targetInfo []byte) []byte { + + temp := []byte{1, 1, 0, 0, 0, 0, 0, 0} + temp = append(temp, timestamp...) + temp = append(temp, clientChallenge...) + temp = append(temp, 0, 0, 0, 0) + temp = append(temp, targetInfo...) + temp = append(temp, 0, 0, 0, 0) + + NTProofStr := hmacMd5(ntlmV2Hash, serverChallenge, temp) + return append(NTProofStr, temp...) +} + +func computeLmV2Response(ntlmV2Hash, serverChallenge, clientChallenge []byte) []byte { + return append(hmacMd5(ntlmV2Hash, serverChallenge, clientChallenge), clientChallenge...) +} + +func hmacMd5(key []byte, data ...[]byte) []byte { + mac := hmac.New(md5.New, key) + for _, d := range data { + mac.Write(d) + } + return mac.Sum(nil) +} diff --git a/vendor/github.com/Azure/go-ntlmssp/unicode.go b/vendor/github.com/Azure/go-ntlmssp/unicode.go new file mode 100644 index 0000000000000..7b4f47163d0e1 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/unicode.go @@ -0,0 +1,29 @@ +package ntlmssp + +import ( + "bytes" + "encoding/binary" + "errors" + "unicode/utf16" +) + +// helper func's for dealing with Windows Unicode (UTF16LE) + +func fromUnicode(d []byte) (string, error) { + if len(d)%2 > 0 { + return "", errors.New("Unicode (UTF 16 LE) specified, but uneven data length") + } + s := make([]uint16, len(d)/2) + err := binary.Read(bytes.NewReader(d), binary.LittleEndian, &s) + if err != nil { + return "", err + } + return string(utf16.Decode(s)), nil +} + +func toUnicode(s string) []byte { + uints := utf16.Encode([]rune(s)) + b := bytes.Buffer{} + binary.Write(&b, binary.LittleEndian, &uints) + return b.Bytes() +} diff --git a/vendor/github.com/Azure/go-ntlmssp/varfield.go b/vendor/github.com/Azure/go-ntlmssp/varfield.go new file mode 100644 index 0000000000000..15f9aa113d8ba --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/varfield.go @@ -0,0 +1,40 @@ +package ntlmssp + +import ( + "errors" +) + +type varField struct { + Len uint16 + MaxLen uint16 + BufferOffset uint32 +} + +func (f varField) ReadFrom(buffer []byte) ([]byte, error) { + if len(buffer) < int(f.BufferOffset+uint32(f.Len)) { + return nil, errors.New("Error reading data, varField extends beyond buffer") + } + return buffer[f.BufferOffset : f.BufferOffset+uint32(f.Len)], nil +} + +func (f varField) ReadStringFrom(buffer []byte, unicode bool) (string, error) { + d, err := f.ReadFrom(buffer) + if err != nil { + return "", err + } + if unicode { // UTF-16LE encoding scheme + return fromUnicode(d) + } + // OEM encoding, close enough to ASCII, since no code page is specified + return string(d), err +} + +func newVarField(ptr *int, fieldsize int) varField { + f := varField{ + Len: uint16(fieldsize), + MaxLen: uint16(fieldsize), + BufferOffset: uint32(*ptr), + } + *ptr += fieldsize + return f +} diff --git a/vendor/github.com/Azure/go-ntlmssp/version.go b/vendor/github.com/Azure/go-ntlmssp/version.go new file mode 100644 index 0000000000000..6d848921244d8 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/version.go @@ -0,0 +1,20 @@ +package ntlmssp + +// Version is a struct representing https://msdn.microsoft.com/en-us/library/cc236654.aspx +type Version struct { + ProductMajorVersion uint8 + ProductMinorVersion uint8 + ProductBuild uint16 + _ [3]byte + NTLMRevisionCurrent uint8 +} + +// DefaultVersion returns a Version with "sensible" defaults (Windows 7) +func DefaultVersion() Version { + return Version{ + ProductMajorVersion: 6, + ProductMinorVersion: 1, + ProductBuild: 7601, + NTLMRevisionCurrent: 15, + } +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/LICENSE b/vendor/github.com/go-asn1-ber/asn1-ber/LICENSE new file mode 100644 index 0000000000000..23f942534570e --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com) +Portions copyright (c) 2015-2016 go-asn1-ber Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/README.md b/vendor/github.com/go-asn1-ber/asn1-ber/README.md new file mode 100644 index 0000000000000..e3a9560d6814b --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/README.md @@ -0,0 +1,24 @@ +[![GoDoc](https://godoc.org/gopkg.in/asn1-ber.v1?status.svg)](https://godoc.org/gopkg.in/asn1-ber.v1) [![Build Status](https://travis-ci.org/go-asn1-ber/asn1-ber.svg)](https://travis-ci.org/go-asn1-ber/asn1-ber) + + +ASN1 BER Encoding / Decoding Library for the GO programming language. +--------------------------------------------------------------------- + +Required libraries: + None + +Working: + Very basic encoding / decoding needed for LDAP protocol + +Tests Implemented: + A few + +TODO: + Fix all encoding / decoding to conform to ASN1 BER spec + Implement Tests / Benchmarks + +--- + +The Go gopher was designed by Renee French. (http://reneefrench.blogspot.com/) +The design is licensed under the Creative Commons 3.0 Attributions license. +Read this article for more details: http://blog.golang.org/gopher diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/ber.go b/vendor/github.com/go-asn1-ber/asn1-ber/ber.go new file mode 100644 index 0000000000000..4fd7a66e18f37 --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/ber.go @@ -0,0 +1,620 @@ +package ber + +import ( + "bytes" + "errors" + "fmt" + "io" + "math" + "os" + "reflect" + "time" + "unicode/utf8" +) + +// MaxPacketLengthBytes specifies the maximum allowed packet size when calling ReadPacket or DecodePacket. Set to 0 for +// no limit. +var MaxPacketLengthBytes int64 = math.MaxInt32 + +type Packet struct { + Identifier + Value interface{} + ByteValue []byte + Data *bytes.Buffer + Children []*Packet + Description string +} + +type Identifier struct { + ClassType Class + TagType Type + Tag Tag +} + +type Tag uint64 + +const ( + TagEOC Tag = 0x00 + TagBoolean Tag = 0x01 + TagInteger Tag = 0x02 + TagBitString Tag = 0x03 + TagOctetString Tag = 0x04 + TagNULL Tag = 0x05 + TagObjectIdentifier Tag = 0x06 + TagObjectDescriptor Tag = 0x07 + TagExternal Tag = 0x08 + TagRealFloat Tag = 0x09 + TagEnumerated Tag = 0x0a + TagEmbeddedPDV Tag = 0x0b + TagUTF8String Tag = 0x0c + TagRelativeOID Tag = 0x0d + TagSequence Tag = 0x10 + TagSet Tag = 0x11 + TagNumericString Tag = 0x12 + TagPrintableString Tag = 0x13 + TagT61String Tag = 0x14 + TagVideotexString Tag = 0x15 + TagIA5String Tag = 0x16 + TagUTCTime Tag = 0x17 + TagGeneralizedTime Tag = 0x18 + TagGraphicString Tag = 0x19 + TagVisibleString Tag = 0x1a + TagGeneralString Tag = 0x1b + TagUniversalString Tag = 0x1c + TagCharacterString Tag = 0x1d + TagBMPString Tag = 0x1e + TagBitmask Tag = 0x1f // xxx11111b + + // HighTag indicates the start of a high-tag byte sequence + HighTag Tag = 0x1f // xxx11111b + // HighTagContinueBitmask indicates the high-tag byte sequence should continue + HighTagContinueBitmask Tag = 0x80 // 10000000b + // HighTagValueBitmask obtains the tag value from a high-tag byte sequence byte + HighTagValueBitmask Tag = 0x7f // 01111111b +) + +const ( + // LengthLongFormBitmask is the mask to apply to the length byte to see if a long-form byte sequence is used + LengthLongFormBitmask = 0x80 + // LengthValueBitmask is the mask to apply to the length byte to get the number of bytes in the long-form byte sequence + LengthValueBitmask = 0x7f + + // LengthIndefinite is returned from readLength to indicate an indefinite length + LengthIndefinite = -1 +) + +var tagMap = map[Tag]string{ + TagEOC: "EOC (End-of-Content)", + TagBoolean: "Boolean", + TagInteger: "Integer", + TagBitString: "Bit String", + TagOctetString: "Octet String", + TagNULL: "NULL", + TagObjectIdentifier: "Object Identifier", + TagObjectDescriptor: "Object Descriptor", + TagExternal: "External", + TagRealFloat: "Real (float)", + TagEnumerated: "Enumerated", + TagEmbeddedPDV: "Embedded PDV", + TagUTF8String: "UTF8 String", + TagRelativeOID: "Relative-OID", + TagSequence: "Sequence and Sequence of", + TagSet: "Set and Set OF", + TagNumericString: "Numeric String", + TagPrintableString: "Printable String", + TagT61String: "T61 String", + TagVideotexString: "Videotex String", + TagIA5String: "IA5 String", + TagUTCTime: "UTC Time", + TagGeneralizedTime: "Generalized Time", + TagGraphicString: "Graphic String", + TagVisibleString: "Visible String", + TagGeneralString: "General String", + TagUniversalString: "Universal String", + TagCharacterString: "Character String", + TagBMPString: "BMP String", +} + +type Class uint8 + +const ( + ClassUniversal Class = 0 // 00xxxxxxb + ClassApplication Class = 64 // 01xxxxxxb + ClassContext Class = 128 // 10xxxxxxb + ClassPrivate Class = 192 // 11xxxxxxb + ClassBitmask Class = 192 // 11xxxxxxb +) + +var ClassMap = map[Class]string{ + ClassUniversal: "Universal", + ClassApplication: "Application", + ClassContext: "Context", + ClassPrivate: "Private", +} + +type Type uint8 + +const ( + TypePrimitive Type = 0 // xx0xxxxxb + TypeConstructed Type = 32 // xx1xxxxxb + TypeBitmask Type = 32 // xx1xxxxxb +) + +var TypeMap = map[Type]string{ + TypePrimitive: "Primitive", + TypeConstructed: "Constructed", +} + +var Debug = false + +func PrintBytes(out io.Writer, buf []byte, indent string) { + dataLines := make([]string, (len(buf)/30)+1) + numLines := make([]string, (len(buf)/30)+1) + + for i, b := range buf { + dataLines[i/30] += fmt.Sprintf("%02x ", b) + numLines[i/30] += fmt.Sprintf("%02d ", (i+1)%100) + } + + for i := 0; i < len(dataLines); i++ { + _, _ = out.Write([]byte(indent + dataLines[i] + "\n")) + _, _ = out.Write([]byte(indent + numLines[i] + "\n\n")) + } +} + +func WritePacket(out io.Writer, p *Packet) { + printPacket(out, p, 0, false) +} + +func PrintPacket(p *Packet) { + printPacket(os.Stdout, p, 0, false) +} + +func printPacket(out io.Writer, p *Packet, indent int, printBytes bool) { + indentStr := "" + + for len(indentStr) != indent { + indentStr += " " + } + + classStr := ClassMap[p.ClassType] + + tagTypeStr := TypeMap[p.TagType] + + tagStr := fmt.Sprintf("0x%02X", p.Tag) + + if p.ClassType == ClassUniversal { + tagStr = tagMap[p.Tag] + } + + value := fmt.Sprint(p.Value) + description := "" + + if p.Description != "" { + description = p.Description + ": " + } + + _, _ = fmt.Fprintf(out, "%s%s(%s, %s, %s) Len=%d %q\n", indentStr, description, classStr, tagTypeStr, tagStr, p.Data.Len(), value) + + if printBytes { + PrintBytes(out, p.Bytes(), indentStr) + } + + for _, child := range p.Children { + printPacket(out, child, indent+1, printBytes) + } +} + +// ReadPacket reads a single Packet from the reader. +func ReadPacket(reader io.Reader) (*Packet, error) { + p, _, err := readPacket(reader) + if err != nil { + return nil, err + } + return p, nil +} + +func DecodeString(data []byte) string { + return string(data) +} + +func ParseInt64(bytes []byte) (ret int64, err error) { + if len(bytes) > 8 { + // We'll overflow an int64 in this case. + err = fmt.Errorf("integer too large") + return + } + for bytesRead := 0; bytesRead < len(bytes); bytesRead++ { + ret <<= 8 + ret |= int64(bytes[bytesRead]) + } + + // Shift up and down in order to sign extend the result. + ret <<= 64 - uint8(len(bytes))*8 + ret >>= 64 - uint8(len(bytes))*8 + return +} + +func encodeInteger(i int64) []byte { + n := int64Length(i) + out := make([]byte, n) + + var j int + for ; n > 0; n-- { + out[j] = byte(i >> uint((n-1)*8)) + j++ + } + + return out +} + +func int64Length(i int64) (numBytes int) { + numBytes = 1 + + for i > 127 { + numBytes++ + i >>= 8 + } + + for i < -128 { + numBytes++ + i >>= 8 + } + + return +} + +// DecodePacket decodes the given bytes into a single Packet +// If a decode error is encountered, nil is returned. +func DecodePacket(data []byte) *Packet { + p, _, _ := readPacket(bytes.NewBuffer(data)) + + return p +} + +// DecodePacketErr decodes the given bytes into a single Packet +// If a decode error is encountered, nil is returned. +func DecodePacketErr(data []byte) (*Packet, error) { + p, _, err := readPacket(bytes.NewBuffer(data)) + if err != nil { + return nil, err + } + return p, nil +} + +// readPacket reads a single Packet from the reader, returning the number of bytes read. +func readPacket(reader io.Reader) (*Packet, int, error) { + identifier, length, read, err := readHeader(reader) + if err != nil { + return nil, read, err + } + + p := &Packet{ + Identifier: identifier, + } + + p.Data = new(bytes.Buffer) + p.Children = make([]*Packet, 0, 2) + p.Value = nil + + if p.TagType == TypeConstructed { + // TODO: if universal, ensure tag type is allowed to be constructed + + // Track how much content we've read + contentRead := 0 + for { + if length != LengthIndefinite { + // End if we've read what we've been told to + if contentRead == length { + break + } + // Detect if a packet boundary didn't fall on the expected length + if contentRead > length { + return nil, read, fmt.Errorf("expected to read %d bytes, read %d", length, contentRead) + } + } + + // Read the next packet + child, r, err := readPacket(reader) + if err != nil { + return nil, read, err + } + contentRead += r + read += r + + // Test is this is the EOC marker for our packet + if isEOCPacket(child) { + if length == LengthIndefinite { + break + } + return nil, read, errors.New("eoc child not allowed with definite length") + } + + // Append and continue + p.AppendChild(child) + } + return p, read, nil + } + + if length == LengthIndefinite { + return nil, read, errors.New("indefinite length used with primitive type") + } + + // Read definite-length content + if MaxPacketLengthBytes > 0 && int64(length) > MaxPacketLengthBytes { + return nil, read, fmt.Errorf("length %d greater than maximum %d", length, MaxPacketLengthBytes) + } + content := make([]byte, length) + if length > 0 { + _, err := io.ReadFull(reader, content) + if err != nil { + if err == io.EOF { + return nil, read, io.ErrUnexpectedEOF + } + return nil, read, err + } + read += length + } + + if p.ClassType == ClassUniversal { + p.Data.Write(content) + p.ByteValue = content + + switch p.Tag { + case TagEOC: + case TagBoolean: + val, _ := ParseInt64(content) + + p.Value = val != 0 + case TagInteger: + p.Value, _ = ParseInt64(content) + case TagBitString: + case TagOctetString: + // the actual string encoding is not known here + // (e.g. for LDAP content is already an UTF8-encoded + // string). Return the data without further processing + p.Value = DecodeString(content) + case TagNULL: + case TagObjectIdentifier: + case TagObjectDescriptor: + case TagExternal: + case TagRealFloat: + p.Value, err = ParseReal(content) + case TagEnumerated: + p.Value, _ = ParseInt64(content) + case TagEmbeddedPDV: + case TagUTF8String: + val := DecodeString(content) + if !utf8.Valid([]byte(val)) { + err = errors.New("invalid UTF-8 string") + } else { + p.Value = val + } + case TagRelativeOID: + case TagSequence: + case TagSet: + case TagNumericString: + case TagPrintableString: + val := DecodeString(content) + if err = isPrintableString(val); err == nil { + p.Value = val + } + case TagT61String: + case TagVideotexString: + case TagIA5String: + val := DecodeString(content) + for i, c := range val { + if c >= 0x7F { + err = fmt.Errorf("invalid character for IA5String at pos %d: %c", i, c) + break + } + } + if err == nil { + p.Value = val + } + case TagUTCTime: + case TagGeneralizedTime: + p.Value, err = ParseGeneralizedTime(content) + case TagGraphicString: + case TagVisibleString: + case TagGeneralString: + case TagUniversalString: + case TagCharacterString: + case TagBMPString: + } + } else { + p.Data.Write(content) + } + + return p, read, err +} + +func isPrintableString(val string) error { + for i, c := range val { + switch { + case c >= 'a' && c <= 'z': + case c >= 'A' && c <= 'Z': + case c >= '0' && c <= '9': + default: + switch c { + case '\'', '(', ')', '+', ',', '-', '.', '=', '/', ':', '?', ' ': + default: + return fmt.Errorf("invalid character in position %d", i) + } + } + } + return nil +} + +func (p *Packet) Bytes() []byte { + var out bytes.Buffer + + out.Write(encodeIdentifier(p.Identifier)) + out.Write(encodeLength(p.Data.Len())) + out.Write(p.Data.Bytes()) + + return out.Bytes() +} + +func (p *Packet) AppendChild(child *Packet) { + p.Data.Write(child.Bytes()) + p.Children = append(p.Children, child) +} + +func Encode(classType Class, tagType Type, tag Tag, value interface{}, description string) *Packet { + p := new(Packet) + + p.ClassType = classType + p.TagType = tagType + p.Tag = tag + p.Data = new(bytes.Buffer) + + p.Children = make([]*Packet, 0, 2) + + p.Value = value + p.Description = description + + if value != nil { + v := reflect.ValueOf(value) + + if classType == ClassUniversal { + switch tag { + case TagOctetString: + sv, ok := v.Interface().(string) + + if ok { + p.Data.Write([]byte(sv)) + } + case TagEnumerated: + bv, ok := v.Interface().([]byte) + if ok { + p.Data.Write(bv) + } + case TagEmbeddedPDV: + bv, ok := v.Interface().([]byte) + if ok { + p.Data.Write(bv) + } + } + } else if classType == ClassContext { + switch tag { + case TagEnumerated: + bv, ok := v.Interface().([]byte) + if ok { + p.Data.Write(bv) + } + case TagEmbeddedPDV: + bv, ok := v.Interface().([]byte) + if ok { + p.Data.Write(bv) + } + } + } + } + return p +} + +func NewSequence(description string) *Packet { + return Encode(ClassUniversal, TypeConstructed, TagSequence, nil, description) +} + +func NewBoolean(classType Class, tagType Type, tag Tag, value bool, description string) *Packet { + intValue := int64(0) + + if value { + intValue = 1 + } + + p := Encode(classType, tagType, tag, nil, description) + + p.Value = value + p.Data.Write(encodeInteger(intValue)) + + return p +} + +// NewLDAPBoolean returns a RFC 4511-compliant Boolean packet. +func NewLDAPBoolean(classType Class, tagType Type, tag Tag, value bool, description string) *Packet { + intValue := int64(0) + + if value { + intValue = 255 + } + + p := Encode(classType, tagType, tag, nil, description) + + p.Value = value + p.Data.Write(encodeInteger(intValue)) + + return p +} + +func NewInteger(classType Class, tagType Type, tag Tag, value interface{}, description string) *Packet { + p := Encode(classType, tagType, tag, nil, description) + + p.Value = value + switch v := value.(type) { + case int: + p.Data.Write(encodeInteger(int64(v))) + case uint: + p.Data.Write(encodeInteger(int64(v))) + case int64: + p.Data.Write(encodeInteger(v)) + case uint64: + // TODO : check range or add encodeUInt... + p.Data.Write(encodeInteger(int64(v))) + case int32: + p.Data.Write(encodeInteger(int64(v))) + case uint32: + p.Data.Write(encodeInteger(int64(v))) + case int16: + p.Data.Write(encodeInteger(int64(v))) + case uint16: + p.Data.Write(encodeInteger(int64(v))) + case int8: + p.Data.Write(encodeInteger(int64(v))) + case uint8: + p.Data.Write(encodeInteger(int64(v))) + default: + // TODO : add support for big.Int ? + panic(fmt.Sprintf("Invalid type %T, expected {u|}int{64|32|16|8}", v)) + } + + return p +} + +func NewString(classType Class, tagType Type, tag Tag, value, description string) *Packet { + p := Encode(classType, tagType, tag, nil, description) + + p.Value = value + p.Data.Write([]byte(value)) + + return p +} + +func NewGeneralizedTime(classType Class, tagType Type, tag Tag, value time.Time, description string) *Packet { + p := Encode(classType, tagType, tag, nil, description) + var s string + if value.Nanosecond() != 0 { + s = value.Format(`20060102150405.000000000Z`) + } else { + s = value.Format(`20060102150405Z`) + } + p.Value = s + p.Data.Write([]byte(s)) + return p +} + +func NewReal(classType Class, tagType Type, tag Tag, value interface{}, description string) *Packet { + p := Encode(classType, tagType, tag, nil, description) + + switch v := value.(type) { + case float64: + p.Data.Write(encodeFloat(v)) + case float32: + p.Data.Write(encodeFloat(float64(v))) + default: + panic(fmt.Sprintf("Invalid type %T, expected float{64|32}", v)) + } + return p +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/content_int.go b/vendor/github.com/go-asn1-ber/asn1-ber/content_int.go new file mode 100644 index 0000000000000..20b500f553d1c --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/content_int.go @@ -0,0 +1,25 @@ +package ber + +func encodeUnsignedInteger(i uint64) []byte { + n := uint64Length(i) + out := make([]byte, n) + + var j int + for ; n > 0; n-- { + out[j] = byte(i >> uint((n-1)*8)) + j++ + } + + return out +} + +func uint64Length(i uint64) (numBytes int) { + numBytes = 1 + + for i > 255 { + numBytes++ + i >>= 8 + } + + return +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/generalizedTime.go b/vendor/github.com/go-asn1-ber/asn1-ber/generalizedTime.go new file mode 100644 index 0000000000000..51215f0619524 --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/generalizedTime.go @@ -0,0 +1,105 @@ +package ber + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "time" +) + +// ErrInvalidTimeFormat is returned when the generalizedTime string was not correct. +var ErrInvalidTimeFormat = errors.New("invalid time format") + +var zeroTime = time.Time{} + +// ParseGeneralizedTime parses a string value and if it conforms to +// GeneralizedTime[^0] format, will return a time.Time for that value. +// +// [^0]: https://www.itu.int/rec/T-REC-X.690-201508-I/en Section 11.7 +func ParseGeneralizedTime(v []byte) (time.Time, error) { + var format string + var fract time.Duration + + str := []byte(DecodeString(v)) + tzIndex := bytes.IndexAny(str, "Z+-") + if tzIndex < 0 { + return zeroTime, ErrInvalidTimeFormat + } + + dot := bytes.IndexAny(str, ".,") + switch dot { + case -1: + switch tzIndex { + case 10: + format = `2006010215Z` + case 12: + format = `200601021504Z` + case 14: + format = `20060102150405Z` + default: + return zeroTime, ErrInvalidTimeFormat + } + + case 10, 12: + if tzIndex < dot { + return zeroTime, ErrInvalidTimeFormat + } + // a "," is also allowed, but would not be parsed by time.Parse(): + str[dot] = '.' + + // If is omitted, then represents a fraction of an + // hour; otherwise, if and are omitted, then + // represents a fraction of a minute; otherwise, + // represents a fraction of a second. + + // parse as float from dot to timezone + f, err := strconv.ParseFloat(string(str[dot:tzIndex]), 64) + if err != nil { + return zeroTime, fmt.Errorf("failed to parse float: %s", err) + } + // ...and strip that part + str = append(str[:dot], str[tzIndex:]...) + tzIndex = dot + + if dot == 10 { + fract = time.Duration(int64(f * float64(time.Hour))) + format = `2006010215Z` + } else { + fract = time.Duration(int64(f * float64(time.Minute))) + format = `200601021504Z` + } + + case 14: + if tzIndex < dot { + return zeroTime, ErrInvalidTimeFormat + } + str[dot] = '.' + // no need for fractional seconds, time.Parse() handles that + format = `20060102150405Z` + + default: + return zeroTime, ErrInvalidTimeFormat + } + + l := len(str) + switch l - tzIndex { + case 1: + if str[l-1] != 'Z' { + return zeroTime, ErrInvalidTimeFormat + } + case 3: + format += `0700` + str = append(str, []byte("00")...) + case 5: + format += `0700` + default: + return zeroTime, ErrInvalidTimeFormat + } + + t, err := time.Parse(format, string(str)) + if err != nil { + return zeroTime, fmt.Errorf("%s: %s", ErrInvalidTimeFormat, err) + } + return t.Add(fract), nil +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/header.go b/vendor/github.com/go-asn1-ber/asn1-ber/header.go new file mode 100644 index 0000000000000..7dfa6b9a7ddb6 --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/header.go @@ -0,0 +1,38 @@ +package ber + +import ( + "errors" + "fmt" + "io" +) + +func readHeader(reader io.Reader) (identifier Identifier, length int, read int, err error) { + var ( + c, l int + i Identifier + ) + + if i, c, err = readIdentifier(reader); err != nil { + return Identifier{}, 0, read, err + } + identifier = i + read += c + + if l, c, err = readLength(reader); err != nil { + return Identifier{}, 0, read, err + } + length = l + read += c + + // Validate length type with identifier (x.600, 8.1.3.2.a) + if length == LengthIndefinite && identifier.TagType == TypePrimitive { + return Identifier{}, 0, read, errors.New("indefinite length used with primitive type") + } + + if length < LengthIndefinite { + err = fmt.Errorf("length cannot be less than %d", LengthIndefinite) + return + } + + return identifier, length, read, nil +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/identifier.go b/vendor/github.com/go-asn1-ber/asn1-ber/identifier.go new file mode 100644 index 0000000000000..e8c435749a68a --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/identifier.go @@ -0,0 +1,112 @@ +package ber + +import ( + "errors" + "fmt" + "io" +) + +func readIdentifier(reader io.Reader) (Identifier, int, error) { + identifier := Identifier{} + read := 0 + + // identifier byte + b, err := readByte(reader) + if err != nil { + if Debug { + fmt.Printf("error reading identifier byte: %v\n", err) + } + return Identifier{}, read, err + } + read++ + + identifier.ClassType = Class(b) & ClassBitmask + identifier.TagType = Type(b) & TypeBitmask + + if tag := Tag(b) & TagBitmask; tag != HighTag { + // short-form tag + identifier.Tag = tag + return identifier, read, nil + } + + // high-tag-number tag + tagBytes := 0 + for { + b, err := readByte(reader) + if err != nil { + if Debug { + fmt.Printf("error reading high-tag-number tag byte %d: %v\n", tagBytes, err) + } + return Identifier{}, read, err + } + tagBytes++ + read++ + + // Lowest 7 bits get appended to the tag value (x.690, 8.1.2.4.2.b) + identifier.Tag <<= 7 + identifier.Tag |= Tag(b) & HighTagValueBitmask + + // First byte may not be all zeros (x.690, 8.1.2.4.2.c) + if tagBytes == 1 && identifier.Tag == 0 { + return Identifier{}, read, errors.New("invalid first high-tag-number tag byte") + } + // Overflow of int64 + // TODO: support big int tags? + if tagBytes > 9 { + return Identifier{}, read, errors.New("high-tag-number tag overflow") + } + + // Top bit of 0 means this is the last byte in the high-tag-number tag (x.690, 8.1.2.4.2.a) + if Tag(b)&HighTagContinueBitmask == 0 { + break + } + } + + return identifier, read, nil +} + +func encodeIdentifier(identifier Identifier) []byte { + b := []byte{0x0} + b[0] |= byte(identifier.ClassType) + b[0] |= byte(identifier.TagType) + + if identifier.Tag < HighTag { + // Short-form + b[0] |= byte(identifier.Tag) + } else { + // high-tag-number + b[0] |= byte(HighTag) + + tag := identifier.Tag + + b = append(b, encodeHighTag(tag)...) + } + return b +} + +func encodeHighTag(tag Tag) []byte { + // set cap=4 to hopefully avoid additional allocations + b := make([]byte, 0, 4) + for tag != 0 { + // t := last 7 bits of tag (HighTagValueBitmask = 0x7F) + t := tag & HighTagValueBitmask + + // right shift tag 7 to remove what was just pulled off + tag >>= 7 + + // if b already has entries this entry needs a continuation bit (0x80) + if len(b) != 0 { + t |= HighTagContinueBitmask + } + + b = append(b, byte(t)) + } + // reverse + // since bits were pulled off 'tag' small to high the byte slice is in reverse order. + // example: tag = 0xFF results in {0x7F, 0x01 + 0x80 (continuation bit)} + // this needs to be reversed into 0x81 0x7F + for i, j := 0, len(b)-1; i < len(b)/2; i++ { + b[i], b[j-i] = b[j-i], b[i] + } + return b +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/length.go b/vendor/github.com/go-asn1-ber/asn1-ber/length.go new file mode 100644 index 0000000000000..9cc195d0bdf17 --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/length.go @@ -0,0 +1,81 @@ +package ber + +import ( + "errors" + "fmt" + "io" +) + +func readLength(reader io.Reader) (length int, read int, err error) { + // length byte + b, err := readByte(reader) + if err != nil { + if Debug { + fmt.Printf("error reading length byte: %v\n", err) + } + return 0, 0, err + } + read++ + + switch { + case b == 0xFF: + // Invalid 0xFF (x.600, 8.1.3.5.c) + return 0, read, errors.New("invalid length byte 0xff") + + case b == LengthLongFormBitmask: + // Indefinite form, we have to decode packets until we encounter an EOC packet (x.600, 8.1.3.6) + length = LengthIndefinite + + case b&LengthLongFormBitmask == 0: + // Short definite form, extract the length from the bottom 7 bits (x.600, 8.1.3.4) + length = int(b) & LengthValueBitmask + + case b&LengthLongFormBitmask != 0: + // Long definite form, extract the number of length bytes to follow from the bottom 7 bits (x.600, 8.1.3.5.b) + lengthBytes := int(b) & LengthValueBitmask + // Protect against overflow + // TODO: support big int length? + if lengthBytes > 8 { + return 0, read, errors.New("long-form length overflow") + } + + // Accumulate into a 64-bit variable + var length64 int64 + for i := 0; i < lengthBytes; i++ { + b, err = readByte(reader) + if err != nil { + if Debug { + fmt.Printf("error reading long-form length byte %d: %v\n", i, err) + } + return 0, read, err + } + read++ + + // x.600, 8.1.3.5 + length64 <<= 8 + length64 |= int64(b) + } + + // Cast to a platform-specific integer + length = int(length64) + // Ensure we didn't overflow + if int64(length) != length64 { + return 0, read, errors.New("long-form length overflow") + } + + default: + return 0, read, errors.New("invalid length byte") + } + + return length, read, nil +} + +func encodeLength(length int) []byte { + lengthBytes := encodeUnsignedInteger(uint64(length)) + if length > 127 || len(lengthBytes) > 1 { + longFormBytes := []byte{LengthLongFormBitmask | byte(len(lengthBytes))} + longFormBytes = append(longFormBytes, lengthBytes...) + lengthBytes = longFormBytes + } + return lengthBytes +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/real.go b/vendor/github.com/go-asn1-ber/asn1-ber/real.go new file mode 100644 index 0000000000000..610a003a73444 --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/real.go @@ -0,0 +1,157 @@ +package ber + +import ( + "bytes" + "errors" + "fmt" + "math" + "strconv" + "strings" +) + +func encodeFloat(v float64) []byte { + switch { + case math.IsInf(v, 1): + return []byte{0x40} + case math.IsInf(v, -1): + return []byte{0x41} + case math.IsNaN(v): + return []byte{0x42} + case v == 0.0: + if math.Signbit(v) { + return []byte{0x43} + } + return []byte{} + default: + // we take the easy part ;-) + value := []byte(strconv.FormatFloat(v, 'G', -1, 64)) + var ret []byte + if bytes.Contains(value, []byte{'E'}) { + ret = []byte{0x03} + } else { + ret = []byte{0x02} + } + ret = append(ret, value...) + return ret + } +} + +func ParseReal(v []byte) (val float64, err error) { + if len(v) == 0 { + return 0.0, nil + } + switch { + case v[0]&0x80 == 0x80: + val, err = parseBinaryFloat(v) + case v[0]&0xC0 == 0x40: + val, err = parseSpecialFloat(v) + case v[0]&0xC0 == 0x0: + val, err = parseDecimalFloat(v) + default: + return 0.0, fmt.Errorf("invalid info block") + } + if err != nil { + return 0.0, err + } + + if val == 0.0 && !math.Signbit(val) { + return 0.0, errors.New("REAL value +0 must be encoded with zero-length value block") + } + return val, nil +} + +func parseBinaryFloat(v []byte) (float64, error) { + var info byte + var buf []byte + + info, v = v[0], v[1:] + + var base int + switch info & 0x30 { + case 0x00: + base = 2 + case 0x10: + base = 8 + case 0x20: + base = 16 + case 0x30: + return 0.0, errors.New("bits 6 and 5 of information octet for REAL are equal to 11") + } + + scale := uint((info & 0x0c) >> 2) + + var expLen int + switch info & 0x03 { + case 0x00: + expLen = 1 + case 0x01: + expLen = 2 + case 0x02: + expLen = 3 + case 0x03: + expLen = int(v[0]) + if expLen > 8 { + return 0.0, errors.New("too big value of exponent") + } + v = v[1:] + } + buf, v = v[:expLen], v[expLen:] + exponent, err := ParseInt64(buf) + if err != nil { + return 0.0, err + } + + if len(v) > 8 { + return 0.0, errors.New("too big value of mantissa") + } + + mant, err := ParseInt64(v) + if err != nil { + return 0.0, err + } + mantissa := mant << scale + + if info&0x40 == 0x40 { + mantissa = -mantissa + } + + return float64(mantissa) * math.Pow(float64(base), float64(exponent)), nil +} + +func parseDecimalFloat(v []byte) (val float64, err error) { + switch v[0] & 0x3F { + case 0x01: // NR form 1 + var iVal int64 + iVal, err = strconv.ParseInt(strings.TrimLeft(string(v[1:]), " "), 10, 64) + val = float64(iVal) + case 0x02, 0x03: // NR form 2, 3 + val, err = strconv.ParseFloat(strings.Replace(strings.TrimLeft(string(v[1:]), " "), ",", ".", -1), 64) + default: + err = errors.New("incorrect NR form") + } + if err != nil { + return 0.0, err + } + + if val == 0.0 && math.Signbit(val) { + return 0.0, errors.New("REAL value -0 must be encoded as a special value") + } + return val, nil +} + +func parseSpecialFloat(v []byte) (float64, error) { + if len(v) != 1 { + return 0.0, errors.New(`encoding of "special value" must not contain exponent and mantissa`) + } + switch v[0] { + case 0x40: + return math.Inf(1), nil + case 0x41: + return math.Inf(-1), nil + case 0x42: + return math.NaN(), nil + case 0x43: + return math.Copysign(0, -1), nil + } + return 0.0, errors.New(`encoding of "special value" not from ASN.1 standard`) +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/util.go b/vendor/github.com/go-asn1-ber/asn1-ber/util.go new file mode 100644 index 0000000000000..14dc87d7c92a6 --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/util.go @@ -0,0 +1,24 @@ +package ber + +import "io" + +func readByte(reader io.Reader) (byte, error) { + bytes := make([]byte, 1) + _, err := io.ReadFull(reader, bytes) + if err != nil { + if err == io.EOF { + return 0, io.ErrUnexpectedEOF + } + return 0, err + } + return bytes[0], nil +} + +func isEOCPacket(p *Packet) bool { + return p != nil && + p.Tag == TagEOC && + p.ClassType == ClassUniversal && + p.TagType == TypePrimitive && + len(p.ByteValue) == 0 && + len(p.Children) == 0 +} diff --git a/vendor/github.com/go-ldap/ldap/v3/LICENSE b/vendor/github.com/go-ldap/ldap/v3/LICENSE new file mode 100644 index 0000000000000..6c0ed4b387271 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com) +Portions copyright (c) 2015-2016 go-ldap Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-ldap/ldap/v3/add.go b/vendor/github.com/go-ldap/ldap/v3/add.go new file mode 100644 index 0000000000000..c3101b7628db1 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/add.go @@ -0,0 +1,89 @@ +package ldap + +import ( + ber "github.com/go-asn1-ber/asn1-ber" +) + +// Attribute represents an LDAP attribute +type Attribute struct { + // Type is the name of the LDAP attribute + Type string + // Vals are the LDAP attribute values + Vals []string +} + +func (a *Attribute) encode() *ber.Packet { + seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attribute") + seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, a.Type, "Type")) + set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue") + for _, value := range a.Vals { + set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals")) + } + seq.AppendChild(set) + return seq +} + +// AddRequest represents an LDAP AddRequest operation +type AddRequest struct { + // DN identifies the entry being added + DN string + // Attributes list the attributes of the new entry + Attributes []Attribute + // Controls hold optional controls to send with the request + Controls []Control +} + +func (req *AddRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationAddRequest, nil, "Add Request") + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN")) + attributes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes") + for _, attribute := range req.Attributes { + attributes.AppendChild(attribute.encode()) + } + pkt.AppendChild(attributes) + + envelope.AppendChild(pkt) + if len(req.Controls) > 0 { + envelope.AppendChild(encodeControls(req.Controls)) + } + + return nil +} + +// Attribute adds an attribute with the given type and values +func (req *AddRequest) Attribute(attrType string, attrVals []string) { + req.Attributes = append(req.Attributes, Attribute{Type: attrType, Vals: attrVals}) +} + +// NewAddRequest returns an AddRequest for the given DN, with no attributes +func NewAddRequest(dn string, controls []Control) *AddRequest { + return &AddRequest{ + DN: dn, + Controls: controls, + } + +} + +// Add performs the given AddRequest +func (l *Conn) Add(addRequest *AddRequest) error { + msgCtx, err := l.doRequest(addRequest) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + packet, err := l.readPacket(msgCtx) + if err != nil { + return err + } + + if packet.Children[1].Tag == ApplicationAddResponse { + err := GetLDAPError(packet) + if err != nil { + return err + } + } else { + logger.Printf("Unexpected Response: %d", packet.Children[1].Tag) + } + return nil +} diff --git a/vendor/github.com/go-ldap/ldap/v3/bind.go b/vendor/github.com/go-ldap/ldap/v3/bind.go new file mode 100644 index 0000000000000..9bc57482837b7 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/bind.go @@ -0,0 +1,540 @@ +package ldap + +import ( + "bytes" + "crypto/md5" + enchex "encoding/hex" + "errors" + "fmt" + "io/ioutil" + "math/rand" + "strings" + + "github.com/Azure/go-ntlmssp" + ber "github.com/go-asn1-ber/asn1-ber" +) + +// SimpleBindRequest represents a username/password bind operation +type SimpleBindRequest struct { + // Username is the name of the Directory object that the client wishes to bind as + Username string + // Password is the credentials to bind with + Password string + // Controls are optional controls to send with the bind request + Controls []Control + // AllowEmptyPassword sets whether the client allows binding with an empty password + // (normally used for unauthenticated bind). + AllowEmptyPassword bool +} + +// SimpleBindResult contains the response from the server +type SimpleBindResult struct { + Controls []Control +} + +// NewSimpleBindRequest returns a bind request +func NewSimpleBindRequest(username string, password string, controls []Control) *SimpleBindRequest { + return &SimpleBindRequest{ + Username: username, + Password: password, + Controls: controls, + AllowEmptyPassword: false, + } +} + +func (req *SimpleBindRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request") + pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version")) + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.Username, "User Name")) + pkt.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, req.Password, "Password")) + + envelope.AppendChild(pkt) + if len(req.Controls) > 0 { + envelope.AppendChild(encodeControls(req.Controls)) + } + + return nil +} + +// SimpleBind performs the simple bind operation defined in the given request +func (l *Conn) SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error) { + if simpleBindRequest.Password == "" && !simpleBindRequest.AllowEmptyPassword { + return nil, NewError(ErrorEmptyPassword, errors.New("ldap: empty password not allowed by the client")) + } + + msgCtx, err := l.doRequest(simpleBindRequest) + if err != nil { + return nil, err + } + defer l.finishMessage(msgCtx) + + packet, err := l.readPacket(msgCtx) + if err != nil { + return nil, err + } + + result := &SimpleBindResult{ + Controls: make([]Control, 0), + } + + if len(packet.Children) == 3 { + for _, child := range packet.Children[2].Children { + decodedChild, decodeErr := DecodeControl(child) + if decodeErr != nil { + return nil, fmt.Errorf("failed to decode child control: %s", decodeErr) + } + result.Controls = append(result.Controls, decodedChild) + } + } + + err = GetLDAPError(packet) + return result, err +} + +// Bind performs a bind with the given username and password. +// +// It does not allow unauthenticated bind (i.e. empty password). Use the UnauthenticatedBind method +// for that. +func (l *Conn) Bind(username, password string) error { + req := &SimpleBindRequest{ + Username: username, + Password: password, + AllowEmptyPassword: false, + } + _, err := l.SimpleBind(req) + return err +} + +// UnauthenticatedBind performs an unauthenticated bind. +// +// A username may be provided for trace (e.g. logging) purpose only, but it is normally not +// authenticated or otherwise validated by the LDAP server. +// +// See https://tools.ietf.org/html/rfc4513#section-5.1.2 . +// See https://tools.ietf.org/html/rfc4513#section-6.3.1 . +func (l *Conn) UnauthenticatedBind(username string) error { + req := &SimpleBindRequest{ + Username: username, + Password: "", + AllowEmptyPassword: true, + } + _, err := l.SimpleBind(req) + return err +} + +// DigestMD5BindRequest represents a digest-md5 bind operation +type DigestMD5BindRequest struct { + Host string + // Username is the name of the Directory object that the client wishes to bind as + Username string + // Password is the credentials to bind with + Password string + // Controls are optional controls to send with the bind request + Controls []Control +} + +func (req *DigestMD5BindRequest) appendTo(envelope *ber.Packet) error { + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request") + request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version")) + request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "User Name")) + + auth := ber.Encode(ber.ClassContext, ber.TypeConstructed, 3, "", "authentication") + auth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "DIGEST-MD5", "SASL Mech")) + request.AppendChild(auth) + envelope.AppendChild(request) + if len(req.Controls) > 0 { + envelope.AppendChild(encodeControls(req.Controls)) + } + return nil +} + +// DigestMD5BindResult contains the response from the server +type DigestMD5BindResult struct { + Controls []Control +} + +// MD5Bind performs a digest-md5 bind with the given host, username and password. +func (l *Conn) MD5Bind(host, username, password string) error { + req := &DigestMD5BindRequest{ + Host: host, + Username: username, + Password: password, + } + _, err := l.DigestMD5Bind(req) + return err +} + +// DigestMD5Bind performs the digest-md5 bind operation defined in the given request +func (l *Conn) DigestMD5Bind(digestMD5BindRequest *DigestMD5BindRequest) (*DigestMD5BindResult, error) { + if digestMD5BindRequest.Password == "" { + return nil, NewError(ErrorEmptyPassword, errors.New("ldap: empty password not allowed by the client")) + } + + msgCtx, err := l.doRequest(digestMD5BindRequest) + if err != nil { + return nil, err + } + defer l.finishMessage(msgCtx) + + packet, err := l.readPacket(msgCtx) + if err != nil { + return nil, err + } + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if l.Debug { + if err = addLDAPDescriptions(packet); err != nil { + return nil, err + } + ber.PrintPacket(packet) + } + + result := &DigestMD5BindResult{ + Controls: make([]Control, 0), + } + var params map[string]string + if len(packet.Children) == 2 { + if len(packet.Children[1].Children) == 4 { + child := packet.Children[1].Children[0] + if child.Tag != ber.TagEnumerated { + return result, GetLDAPError(packet) + } + if child.Value.(int64) != 14 { + return result, GetLDAPError(packet) + } + child = packet.Children[1].Children[3] + if child.Tag != ber.TagObjectDescriptor { + return result, GetLDAPError(packet) + } + if child.Data == nil { + return result, GetLDAPError(packet) + } + data, _ := ioutil.ReadAll(child.Data) + params, err = parseParams(string(data)) + if err != nil { + return result, fmt.Errorf("parsing digest-challenge: %s", err) + } + } + } + + if params != nil { + resp := computeResponse( + params, + "ldap/"+strings.ToLower(digestMD5BindRequest.Host), + digestMD5BindRequest.Username, + digestMD5BindRequest.Password, + ) + packet = ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request") + request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version")) + request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "User Name")) + + auth := ber.Encode(ber.ClassContext, ber.TypeConstructed, 3, "", "authentication") + auth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "DIGEST-MD5", "SASL Mech")) + auth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, resp, "Credentials")) + request.AppendChild(auth) + packet.AppendChild(request) + msgCtx, err = l.sendMessage(packet) + if err != nil { + return nil, fmt.Errorf("send message: %s", err) + } + defer l.finishMessage(msgCtx) + packetResponse, ok := <-msgCtx.responses + if !ok { + return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return nil, fmt.Errorf("read packet: %s", err) + } + } + + err = GetLDAPError(packet) + return result, err +} + +func parseParams(str string) (map[string]string, error) { + m := make(map[string]string) + var key, value string + var state int + for i := 0; i <= len(str); i++ { + switch state { + case 0: //reading key + if i == len(str) { + return nil, fmt.Errorf("syntax error on %d", i) + } + if str[i] != '=' { + key += string(str[i]) + continue + } + state = 1 + case 1: //reading value + if i == len(str) { + m[key] = value + break + } + switch str[i] { + case ',': + m[key] = value + state = 0 + key = "" + value = "" + case '"': + if value != "" { + return nil, fmt.Errorf("syntax error on %d", i) + } + state = 2 + default: + value += string(str[i]) + } + case 2: //inside quotes + if i == len(str) { + return nil, fmt.Errorf("syntax error on %d", i) + } + if str[i] != '"' { + value += string(str[i]) + } else { + state = 1 + } + } + } + return m, nil +} + +func computeResponse(params map[string]string, uri, username, password string) string { + nc := "00000001" + qop := "auth" + cnonce := enchex.EncodeToString(randomBytes(16)) + x := username + ":" + params["realm"] + ":" + password + y := md5Hash([]byte(x)) + + a1 := bytes.NewBuffer(y) + a1.WriteString(":" + params["nonce"] + ":" + cnonce) + if len(params["authzid"]) > 0 { + a1.WriteString(":" + params["authzid"]) + } + a2 := bytes.NewBuffer([]byte("AUTHENTICATE")) + a2.WriteString(":" + uri) + ha1 := enchex.EncodeToString(md5Hash(a1.Bytes())) + ha2 := enchex.EncodeToString(md5Hash(a2.Bytes())) + + kd := ha1 + kd += ":" + params["nonce"] + kd += ":" + nc + kd += ":" + cnonce + kd += ":" + qop + kd += ":" + ha2 + resp := enchex.EncodeToString(md5Hash([]byte(kd))) + return fmt.Sprintf( + `username="%s",realm="%s",nonce="%s",cnonce="%s",nc=00000001,qop=%s,digest-uri="%s",response=%s`, + username, + params["realm"], + params["nonce"], + cnonce, + qop, + uri, + resp, + ) +} + +func md5Hash(b []byte) []byte { + hasher := md5.New() + hasher.Write(b) + return hasher.Sum(nil) +} + +func randomBytes(len int) []byte { + b := make([]byte, len) + for i := 0; i < len; i++ { + b[i] = byte(rand.Intn(256)) + } + return b +} + +var externalBindRequest = requestFunc(func(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request") + pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version")) + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "User Name")) + + saslAuth := ber.Encode(ber.ClassContext, ber.TypeConstructed, 3, "", "authentication") + saslAuth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "EXTERNAL", "SASL Mech")) + saslAuth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "SASL Cred")) + + pkt.AppendChild(saslAuth) + + envelope.AppendChild(pkt) + + return nil +}) + +// ExternalBind performs SASL/EXTERNAL authentication. +// +// Use ldap.DialURL("ldapi://") to connect to the Unix socket before ExternalBind. +// +// See https://tools.ietf.org/html/rfc4422#appendix-A +func (l *Conn) ExternalBind() error { + msgCtx, err := l.doRequest(externalBindRequest) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + packet, err := l.readPacket(msgCtx) + if err != nil { + return err + } + + return GetLDAPError(packet) +} + +// NTLMBind performs an NTLMSSP bind leveraging https://github.com/Azure/go-ntlmssp + +// NTLMBindRequest represents an NTLMSSP bind operation +type NTLMBindRequest struct { + // Domain is the AD Domain to authenticate too. If not specified, it will be grabbed from the NTLMSSP Challenge + Domain string + // Username is the name of the Directory object that the client wishes to bind as + Username string + // Password is the credentials to bind with + Password string + // Hash is the hex NTLM hash to bind with. Password or hash must be provided + Hash string + // Controls are optional controls to send with the bind request + Controls []Control +} + +func (req *NTLMBindRequest) appendTo(envelope *ber.Packet) error { + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request") + request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version")) + request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "User Name")) + + // generate an NTLMSSP Negotiation message for the specified domain (it can be blank) + negMessage, err := ntlmssp.NewNegotiateMessage(req.Domain, "") + if err != nil { + return fmt.Errorf("err creating negmessage: %s", err) + } + + // append the generated NTLMSSP message as a TagEnumerated BER value + auth := ber.Encode(ber.ClassContext, ber.TypePrimitive, ber.TagEnumerated, negMessage, "authentication") + request.AppendChild(auth) + envelope.AppendChild(request) + if len(req.Controls) > 0 { + envelope.AppendChild(encodeControls(req.Controls)) + } + return nil +} + +// NTLMBindResult contains the response from the server +type NTLMBindResult struct { + Controls []Control +} + +// NTLMBind performs an NTLMSSP Bind with the given domain, username and password +func (l *Conn) NTLMBind(domain, username, password string) error { + req := &NTLMBindRequest{ + Domain: domain, + Username: username, + Password: password, + } + _, err := l.NTLMChallengeBind(req) + return err +} + +// NTLMBindWithHash performs an NTLM Bind with an NTLM hash instead of plaintext password (pass-the-hash) +func (l *Conn) NTLMBindWithHash(domain, username, hash string) error { + req := &NTLMBindRequest{ + Domain: domain, + Username: username, + Hash: hash, + } + _, err := l.NTLMChallengeBind(req) + return err +} + +// NTLMChallengeBind performs the NTLMSSP bind operation defined in the given request +func (l *Conn) NTLMChallengeBind(ntlmBindRequest *NTLMBindRequest) (*NTLMBindResult, error) { + if ntlmBindRequest.Password == "" && ntlmBindRequest.Hash == "" { + return nil, NewError(ErrorEmptyPassword, errors.New("ldap: empty password not allowed by the client")) + } + + msgCtx, err := l.doRequest(ntlmBindRequest) + if err != nil { + return nil, err + } + defer l.finishMessage(msgCtx) + packet, err := l.readPacket(msgCtx) + if err != nil { + return nil, err + } + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if l.Debug { + if err = addLDAPDescriptions(packet); err != nil { + return nil, err + } + ber.PrintPacket(packet) + } + result := &NTLMBindResult{ + Controls: make([]Control, 0), + } + var ntlmsspChallenge []byte + + // now find the NTLM Response Message + if len(packet.Children) == 2 { + if len(packet.Children[1].Children) == 3 { + child := packet.Children[1].Children[1] + ntlmsspChallenge = child.ByteValue + // Check to make sure we got the right message. It will always start with NTLMSSP + if len(ntlmsspChallenge) < 7 || !bytes.Equal(ntlmsspChallenge[:7], []byte("NTLMSSP")) { + return result, GetLDAPError(packet) + } + l.Debug.Printf("%d: found ntlmssp challenge", msgCtx.id) + } + } + if ntlmsspChallenge != nil { + var err error + var responseMessage []byte + // generate a response message to the challenge with the given Username/Password if password is provided + if ntlmBindRequest.Password != "" { + responseMessage, err = ntlmssp.ProcessChallenge(ntlmsspChallenge, ntlmBindRequest.Username, ntlmBindRequest.Password) + } else if ntlmBindRequest.Hash != "" { + responseMessage, err = ntlmssp.ProcessChallengeWithHash(ntlmsspChallenge, ntlmBindRequest.Username, ntlmBindRequest.Hash) + } else { + err = fmt.Errorf("need a password or hash to generate reply") + } + if err != nil { + return result, fmt.Errorf("parsing ntlm-challenge: %s", err) + } + packet = ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request") + request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version")) + request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "User Name")) + + // append the challenge response message as a TagEmbeddedPDV BER value + auth := ber.Encode(ber.ClassContext, ber.TypePrimitive, ber.TagEmbeddedPDV, responseMessage, "authentication") + + request.AppendChild(auth) + packet.AppendChild(request) + msgCtx, err = l.sendMessage(packet) + if err != nil { + return nil, fmt.Errorf("send message: %s", err) + } + defer l.finishMessage(msgCtx) + packetResponse, ok := <-msgCtx.responses + if !ok { + return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return nil, fmt.Errorf("read packet: %s", err) + } + + } + + err = GetLDAPError(packet) + return result, err +} diff --git a/vendor/github.com/go-ldap/ldap/v3/client.go b/vendor/github.com/go-ldap/ldap/v3/client.go new file mode 100644 index 0000000000000..1fa4ad5aa61a7 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/client.go @@ -0,0 +1,32 @@ +package ldap + +import ( + "crypto/tls" + "time" +) + +// Client knows how to interact with an LDAP server +type Client interface { + Start() + StartTLS(*tls.Config) error + Close() + IsClosing() bool + SetTimeout(time.Duration) + + Bind(username, password string) error + UnauthenticatedBind(username string) error + SimpleBind(*SimpleBindRequest) (*SimpleBindResult, error) + ExternalBind() error + + Add(*AddRequest) error + Del(*DelRequest) error + Modify(*ModifyRequest) error + ModifyDN(*ModifyDNRequest) error + ModifyWithResult(*ModifyRequest) (*ModifyResult, error) + + Compare(dn, attribute, value string) (bool, error) + PasswordModify(*PasswordModifyRequest) (*PasswordModifyResult, error) + + Search(*SearchRequest) (*SearchResult, error) + SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error) +} diff --git a/vendor/github.com/go-ldap/ldap/v3/compare.go b/vendor/github.com/go-ldap/ldap/v3/compare.go new file mode 100644 index 0000000000000..cd43e4c53d60c --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/compare.go @@ -0,0 +1,61 @@ +package ldap + +import ( + "fmt" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +// CompareRequest represents an LDAP CompareRequest operation. +type CompareRequest struct { + DN string + Attribute string + Value string +} + +func (req *CompareRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationCompareRequest, nil, "Compare Request") + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN")) + + ava := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "AttributeValueAssertion") + ava.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.Attribute, "AttributeDesc")) + ava.AppendChild(ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.Value, "AssertionValue")) + + pkt.AppendChild(ava) + + envelope.AppendChild(pkt) + + return nil +} + +// Compare checks to see if the attribute of the dn matches value. Returns true if it does otherwise +// false with any error that occurs if any. +func (l *Conn) Compare(dn, attribute, value string) (bool, error) { + msgCtx, err := l.doRequest(&CompareRequest{ + DN: dn, + Attribute: attribute, + Value: value}) + if err != nil { + return false, err + } + defer l.finishMessage(msgCtx) + + packet, err := l.readPacket(msgCtx) + if err != nil { + return false, err + } + + if packet.Children[1].Tag == ApplicationCompareResponse { + err := GetLDAPError(packet) + + switch { + case IsErrorWithCode(err, LDAPResultCompareTrue): + return true, nil + case IsErrorWithCode(err, LDAPResultCompareFalse): + return false, nil + default: + return false, err + } + } + return false, fmt.Errorf("unexpected Response: %d", packet.Children[1].Tag) +} diff --git a/vendor/github.com/go-ldap/ldap/v3/conn.go b/vendor/github.com/go-ldap/ldap/v3/conn.go new file mode 100644 index 0000000000000..6ed7b5e6262cb --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/conn.go @@ -0,0 +1,580 @@ +package ldap + +import ( + "bufio" + "crypto/tls" + "errors" + "fmt" + "net" + "net/url" + "sync" + "sync/atomic" + "time" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +const ( + // MessageQuit causes the processMessages loop to exit + MessageQuit = 0 + // MessageRequest sends a request to the server + MessageRequest = 1 + // MessageResponse receives a response from the server + MessageResponse = 2 + // MessageFinish indicates the client considers a particular message ID to be finished + MessageFinish = 3 + // MessageTimeout indicates the client-specified timeout for a particular message ID has been reached + MessageTimeout = 4 +) + +const ( + // DefaultLdapPort default ldap port for pure TCP connection + DefaultLdapPort = "389" + // DefaultLdapsPort default ldap port for SSL connection + DefaultLdapsPort = "636" +) + +// PacketResponse contains the packet or error encountered reading a response +type PacketResponse struct { + // Packet is the packet read from the server + Packet *ber.Packet + // Error is an error encountered while reading + Error error +} + +// ReadPacket returns the packet or an error +func (pr *PacketResponse) ReadPacket() (*ber.Packet, error) { + if (pr == nil) || (pr.Packet == nil && pr.Error == nil) { + return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve response")) + } + return pr.Packet, pr.Error +} + +type messageContext struct { + id int64 + // close(done) should only be called from finishMessage() + done chan struct{} + // close(responses) should only be called from processMessages(), and only sent to from sendResponse() + responses chan *PacketResponse +} + +// sendResponse should only be called within the processMessages() loop which +// is also responsible for closing the responses channel. +func (msgCtx *messageContext) sendResponse(packet *PacketResponse) { + select { + case msgCtx.responses <- packet: + // Successfully sent packet to message handler. + case <-msgCtx.done: + // The request handler is done and will not receive more + // packets. + } +} + +type messagePacket struct { + Op int + MessageID int64 + Packet *ber.Packet + Context *messageContext +} + +type sendMessageFlags uint + +const ( + startTLS sendMessageFlags = 1 << iota +) + +// Conn represents an LDAP Connection +type Conn struct { + // requestTimeout is loaded atomically + // so we need to ensure 64-bit alignment on 32-bit platforms. + requestTimeout int64 + conn net.Conn + isTLS bool + closing uint32 + closeErr atomic.Value + isStartingTLS bool + Debug debugging + chanConfirm chan struct{} + messageContexts map[int64]*messageContext + chanMessage chan *messagePacket + chanMessageID chan int64 + wgClose sync.WaitGroup + outstandingRequests uint + messageMutex sync.Mutex +} + +var _ Client = &Conn{} + +// DefaultTimeout is a package-level variable that sets the timeout value +// used for the Dial and DialTLS methods. +// +// WARNING: since this is a package-level variable, setting this value from +// multiple places will probably result in undesired behaviour. +var DefaultTimeout = 60 * time.Second + +// DialOpt configures DialContext. +type DialOpt func(*DialContext) + +// DialWithDialer updates net.Dialer in DialContext. +func DialWithDialer(d *net.Dialer) DialOpt { + return func(dc *DialContext) { + dc.d = d + } +} + +// DialWithTLSConfig updates tls.Config in DialContext. +func DialWithTLSConfig(tc *tls.Config) DialOpt { + return func(dc *DialContext) { + dc.tc = tc + } +} + +// DialWithTLSDialer is a wrapper for DialWithTLSConfig with the option to +// specify a net.Dialer to for example define a timeout or a custom resolver. +func DialWithTLSDialer(tlsConfig *tls.Config, dialer *net.Dialer) DialOpt { + return func(dc *DialContext) { + dc.tc = tlsConfig + dc.d = dialer + } +} + +// DialContext contains necessary parameters to dial the given ldap URL. +type DialContext struct { + d *net.Dialer + tc *tls.Config +} + +func (dc *DialContext) dial(u *url.URL) (net.Conn, error) { + if u.Scheme == "ldapi" { + if u.Path == "" || u.Path == "/" { + u.Path = "/var/run/slapd/ldapi" + } + return dc.d.Dial("unix", u.Path) + } + + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + // we assume that error is due to missing port + host = u.Host + port = "" + } + + switch u.Scheme { + case "ldap": + if port == "" { + port = DefaultLdapPort + } + return dc.d.Dial("tcp", net.JoinHostPort(host, port)) + case "ldaps": + if port == "" { + port = DefaultLdapsPort + } + return tls.DialWithDialer(dc.d, "tcp", net.JoinHostPort(host, port), dc.tc) + } + + return nil, fmt.Errorf("Unknown scheme '%s'", u.Scheme) +} + +// Dial connects to the given address on the given network using net.Dial +// and then returns a new Conn for the connection. +// @deprecated Use DialURL instead. +func Dial(network, addr string) (*Conn, error) { + c, err := net.DialTimeout(network, addr, DefaultTimeout) + if err != nil { + return nil, NewError(ErrorNetwork, err) + } + conn := NewConn(c, false) + conn.Start() + return conn, nil +} + +// DialTLS connects to the given address on the given network using tls.Dial +// and then returns a new Conn for the connection. +// @deprecated Use DialURL instead. +func DialTLS(network, addr string, config *tls.Config) (*Conn, error) { + c, err := tls.DialWithDialer(&net.Dialer{Timeout: DefaultTimeout}, network, addr, config) + if err != nil { + return nil, NewError(ErrorNetwork, err) + } + conn := NewConn(c, true) + conn.Start() + return conn, nil +} + +// DialURL connects to the given ldap URL. +// The following schemas are supported: ldap://, ldaps://, ldapi://. +// On success a new Conn for the connection is returned. +func DialURL(addr string, opts ...DialOpt) (*Conn, error) { + u, err := url.Parse(addr) + if err != nil { + return nil, NewError(ErrorNetwork, err) + } + + var dc DialContext + for _, opt := range opts { + opt(&dc) + } + if dc.d == nil { + dc.d = &net.Dialer{Timeout: DefaultTimeout} + } + + c, err := dc.dial(u) + if err != nil { + return nil, NewError(ErrorNetwork, err) + } + + conn := NewConn(c, u.Scheme == "ldaps") + conn.Start() + return conn, nil +} + +// NewConn returns a new Conn using conn for network I/O. +func NewConn(conn net.Conn, isTLS bool) *Conn { + return &Conn{ + conn: conn, + chanConfirm: make(chan struct{}), + chanMessageID: make(chan int64), + chanMessage: make(chan *messagePacket, 10), + messageContexts: map[int64]*messageContext{}, + requestTimeout: 0, + isTLS: isTLS, + } +} + +// Start initializes goroutines to read responses and process messages +func (l *Conn) Start() { + l.wgClose.Add(1) + go l.reader() + go l.processMessages() +} + +// IsClosing returns whether or not we're currently closing. +func (l *Conn) IsClosing() bool { + return atomic.LoadUint32(&l.closing) == 1 +} + +// setClosing sets the closing value to true +func (l *Conn) setClosing() bool { + return atomic.CompareAndSwapUint32(&l.closing, 0, 1) +} + +// Close closes the connection. +func (l *Conn) Close() { + l.messageMutex.Lock() + defer l.messageMutex.Unlock() + + if l.setClosing() { + l.Debug.Printf("Sending quit message and waiting for confirmation") + l.chanMessage <- &messagePacket{Op: MessageQuit} + <-l.chanConfirm + close(l.chanMessage) + + l.Debug.Printf("Closing network connection") + if err := l.conn.Close(); err != nil { + logger.Println(err) + } + + l.wgClose.Done() + } + l.wgClose.Wait() +} + +// SetTimeout sets the time after a request is sent that a MessageTimeout triggers +func (l *Conn) SetTimeout(timeout time.Duration) { + if timeout > 0 { + atomic.StoreInt64(&l.requestTimeout, int64(timeout)) + } +} + +// Returns the next available messageID +func (l *Conn) nextMessageID() int64 { + if messageID, ok := <-l.chanMessageID; ok { + return messageID + } + return 0 +} + +// StartTLS sends the command to start a TLS session and then creates a new TLS Client +func (l *Conn) StartTLS(config *tls.Config) error { + if l.isTLS { + return NewError(ErrorNetwork, errors.New("ldap: already encrypted")) + } + + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Start TLS") + request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, "1.3.6.1.4.1.1466.20037", "TLS Extended Command")) + packet.AppendChild(request) + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessageWithFlags(packet, startTLS) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + + packetResponse, ok := <-msgCtx.responses + if !ok { + return NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + l.Close() + return err + } + l.Debug.PrintPacket(packet) + } + + if err := GetLDAPError(packet); err == nil { + conn := tls.Client(l.conn, config) + + if connErr := conn.Handshake(); connErr != nil { + l.Close() + return NewError(ErrorNetwork, fmt.Errorf("TLS handshake failed (%v)", connErr)) + } + + l.isTLS = true + l.conn = conn + } else { + return err + } + go l.reader() + + return nil +} + +// TLSConnectionState returns the client's TLS connection state. +// The return values are their zero values if StartTLS did +// not succeed. +func (l *Conn) TLSConnectionState() (state tls.ConnectionState, ok bool) { + tc, ok := l.conn.(*tls.Conn) + if !ok { + return + } + return tc.ConnectionState(), true +} + +func (l *Conn) sendMessage(packet *ber.Packet) (*messageContext, error) { + return l.sendMessageWithFlags(packet, 0) +} + +func (l *Conn) sendMessageWithFlags(packet *ber.Packet, flags sendMessageFlags) (*messageContext, error) { + if l.IsClosing() { + return nil, NewError(ErrorNetwork, errors.New("ldap: connection closed")) + } + l.messageMutex.Lock() + l.Debug.Printf("flags&startTLS = %d", flags&startTLS) + if l.isStartingTLS { + l.messageMutex.Unlock() + return nil, NewError(ErrorNetwork, errors.New("ldap: connection is in startls phase")) + } + if flags&startTLS != 0 { + if l.outstandingRequests != 0 { + l.messageMutex.Unlock() + return nil, NewError(ErrorNetwork, errors.New("ldap: cannot StartTLS with outstanding requests")) + } + l.isStartingTLS = true + } + l.outstandingRequests++ + + l.messageMutex.Unlock() + + responses := make(chan *PacketResponse) + messageID := packet.Children[0].Value.(int64) + message := &messagePacket{ + Op: MessageRequest, + MessageID: messageID, + Packet: packet, + Context: &messageContext{ + id: messageID, + done: make(chan struct{}), + responses: responses, + }, + } + if !l.sendProcessMessage(message) { + if l.IsClosing() { + return nil, NewError(ErrorNetwork, errors.New("ldap: connection closed")) + } + return nil, NewError(ErrorNetwork, errors.New("ldap: could not send message for unknown reason")) + } + return message.Context, nil +} + +func (l *Conn) finishMessage(msgCtx *messageContext) { + close(msgCtx.done) + + if l.IsClosing() { + return + } + + l.messageMutex.Lock() + l.outstandingRequests-- + if l.isStartingTLS { + l.isStartingTLS = false + } + l.messageMutex.Unlock() + + message := &messagePacket{ + Op: MessageFinish, + MessageID: msgCtx.id, + } + l.sendProcessMessage(message) +} + +func (l *Conn) sendProcessMessage(message *messagePacket) bool { + l.messageMutex.Lock() + defer l.messageMutex.Unlock() + if l.IsClosing() { + return false + } + l.chanMessage <- message + return true +} + +func (l *Conn) processMessages() { + defer func() { + if err := recover(); err != nil { + logger.Printf("ldap: recovered panic in processMessages: %v", err) + } + for messageID, msgCtx := range l.messageContexts { + // If we are closing due to an error, inform anyone who + // is waiting about the error. + if l.IsClosing() && l.closeErr.Load() != nil { + msgCtx.sendResponse(&PacketResponse{Error: l.closeErr.Load().(error)}) + } + l.Debug.Printf("Closing channel for MessageID %d", messageID) + close(msgCtx.responses) + delete(l.messageContexts, messageID) + } + close(l.chanMessageID) + close(l.chanConfirm) + }() + + var messageID int64 = 1 + for { + select { + case l.chanMessageID <- messageID: + messageID++ + case message := <-l.chanMessage: + switch message.Op { + case MessageQuit: + l.Debug.Printf("Shutting down - quit message received") + return + case MessageRequest: + // Add to message list and write to network + l.Debug.Printf("Sending message %d", message.MessageID) + + buf := message.Packet.Bytes() + _, err := l.conn.Write(buf) + if err != nil { + l.Debug.Printf("Error Sending Message: %s", err.Error()) + message.Context.sendResponse(&PacketResponse{Error: fmt.Errorf("unable to send request: %s", err)}) + close(message.Context.responses) + break + } + + // Only add to messageContexts if we were able to + // successfully write the message. + l.messageContexts[message.MessageID] = message.Context + + // Add timeout if defined + requestTimeout := time.Duration(atomic.LoadInt64(&l.requestTimeout)) + if requestTimeout > 0 { + go func() { + defer func() { + if err := recover(); err != nil { + logger.Printf("ldap: recovered panic in RequestTimeout: %v", err) + } + }() + time.Sleep(requestTimeout) + timeoutMessage := &messagePacket{ + Op: MessageTimeout, + MessageID: message.MessageID, + } + l.sendProcessMessage(timeoutMessage) + }() + } + case MessageResponse: + l.Debug.Printf("Receiving message %d", message.MessageID) + if msgCtx, ok := l.messageContexts[message.MessageID]; ok { + msgCtx.sendResponse(&PacketResponse{message.Packet, nil}) + } else { + logger.Printf("Received unexpected message %d, %v", message.MessageID, l.IsClosing()) + l.Debug.PrintPacket(message.Packet) + } + case MessageTimeout: + // Handle the timeout by closing the channel + // All reads will return immediately + if msgCtx, ok := l.messageContexts[message.MessageID]; ok { + l.Debug.Printf("Receiving message timeout for %d", message.MessageID) + msgCtx.sendResponse(&PacketResponse{message.Packet, NewError(ErrorNetwork, errors.New("ldap: connection timed out"))}) + delete(l.messageContexts, message.MessageID) + close(msgCtx.responses) + } + case MessageFinish: + l.Debug.Printf("Finished message %d", message.MessageID) + if msgCtx, ok := l.messageContexts[message.MessageID]; ok { + delete(l.messageContexts, message.MessageID) + close(msgCtx.responses) + } + } + } + } +} + +func (l *Conn) reader() { + cleanstop := false + defer func() { + if err := recover(); err != nil { + logger.Printf("ldap: recovered panic in reader: %v", err) + } + if !cleanstop { + l.Close() + } + }() + + bufConn := bufio.NewReader(l.conn) + for { + if cleanstop { + l.Debug.Printf("reader clean stopping (without closing the connection)") + return + } + packet, err := ber.ReadPacket(bufConn) + if err != nil { + // A read error is expected here if we are closing the connection... + if !l.IsClosing() { + l.closeErr.Store(fmt.Errorf("unable to read LDAP response packet: %s", err)) + l.Debug.Printf("reader error: %s", err) + } + return + } + if err := addLDAPDescriptions(packet); err != nil { + l.Debug.Printf("descriptions error: %s", err) + } + if len(packet.Children) == 0 { + l.Debug.Printf("Received bad ldap packet") + continue + } + l.messageMutex.Lock() + if l.isStartingTLS { + cleanstop = true + } + l.messageMutex.Unlock() + message := &messagePacket{ + Op: MessageResponse, + MessageID: packet.Children[0].Value.(int64), + Packet: packet, + } + if !l.sendProcessMessage(message) { + return + } + } +} diff --git a/vendor/github.com/go-ldap/ldap/v3/control.go b/vendor/github.com/go-ldap/ldap/v3/control.go new file mode 100644 index 0000000000000..64fb002ad990f --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/control.go @@ -0,0 +1,528 @@ +package ldap + +import ( + "fmt" + "strconv" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +const ( + // ControlTypePaging - https://www.ietf.org/rfc/rfc2696.txt + ControlTypePaging = "1.2.840.113556.1.4.319" + // ControlTypeBeheraPasswordPolicy - https://tools.ietf.org/html/draft-behera-ldap-password-policy-10 + ControlTypeBeheraPasswordPolicy = "1.3.6.1.4.1.42.2.27.8.5.1" + // ControlTypeVChuPasswordMustChange - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00 + ControlTypeVChuPasswordMustChange = "2.16.840.1.113730.3.4.4" + // ControlTypeVChuPasswordWarning - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00 + ControlTypeVChuPasswordWarning = "2.16.840.1.113730.3.4.5" + // ControlTypeManageDsaIT - https://tools.ietf.org/html/rfc3296 + ControlTypeManageDsaIT = "2.16.840.1.113730.3.4.2" + // ControlTypeWhoAmI - https://tools.ietf.org/html/rfc4532 + ControlTypeWhoAmI = "1.3.6.1.4.1.4203.1.11.3" + + // ControlTypeMicrosoftNotification - https://msdn.microsoft.com/en-us/library/aa366983(v=vs.85).aspx + ControlTypeMicrosoftNotification = "1.2.840.113556.1.4.528" + // ControlTypeMicrosoftShowDeleted - https://msdn.microsoft.com/en-us/library/aa366989(v=vs.85).aspx + ControlTypeMicrosoftShowDeleted = "1.2.840.113556.1.4.417" + // ControlTypeMicrosoftServerLinkTTL - https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-adts/f4f523a8-abc0-4b3a-a471-6b2fef135481?redirectedfrom=MSDN + ControlTypeMicrosoftServerLinkTTL = "1.2.840.113556.1.4.2309" +) + +// ControlTypeMap maps controls to text descriptions +var ControlTypeMap = map[string]string{ + ControlTypePaging: "Paging", + ControlTypeBeheraPasswordPolicy: "Password Policy - Behera Draft", + ControlTypeManageDsaIT: "Manage DSA IT", + ControlTypeMicrosoftNotification: "Change Notification - Microsoft", + ControlTypeMicrosoftShowDeleted: "Show Deleted Objects - Microsoft", + ControlTypeMicrosoftServerLinkTTL: "Return TTL-DNs for link values with associated expiry times - Microsoft", +} + +// Control defines an interface controls provide to encode and describe themselves +type Control interface { + // GetControlType returns the OID + GetControlType() string + // Encode returns the ber packet representation + Encode() *ber.Packet + // String returns a human-readable description + String() string +} + +// ControlString implements the Control interface for simple controls +type ControlString struct { + ControlType string + Criticality bool + ControlValue string +} + +// GetControlType returns the OID +func (c *ControlString) GetControlType() string { + return c.ControlType +} + +// Encode returns the ber packet representation +func (c *ControlString) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, c.ControlType, "Control Type ("+ControlTypeMap[c.ControlType]+")")) + if c.Criticality { + packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality")) + } + if c.ControlValue != "" { + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, string(c.ControlValue), "Control Value")) + } + return packet +} + +// String returns a human-readable description +func (c *ControlString) String() string { + return fmt.Sprintf("Control Type: %s (%q) Criticality: %t Control Value: %s", ControlTypeMap[c.ControlType], c.ControlType, c.Criticality, c.ControlValue) +} + +// ControlPaging implements the paging control described in https://www.ietf.org/rfc/rfc2696.txt +type ControlPaging struct { + // PagingSize indicates the page size + PagingSize uint32 + // Cookie is an opaque value returned by the server to track a paging cursor + Cookie []byte +} + +// GetControlType returns the OID +func (c *ControlPaging) GetControlType() string { + return ControlTypePaging +} + +// Encode returns the ber packet representation +func (c *ControlPaging) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypePaging, "Control Type ("+ControlTypeMap[ControlTypePaging]+")")) + + p2 := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Control Value (Paging)") + seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Search Control Value") + seq.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, int64(c.PagingSize), "Paging Size")) + cookie := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Cookie") + cookie.Value = c.Cookie + cookie.Data.Write(c.Cookie) + seq.AppendChild(cookie) + p2.AppendChild(seq) + + packet.AppendChild(p2) + return packet +} + +// String returns a human-readable description +func (c *ControlPaging) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t PagingSize: %d Cookie: %q", + ControlTypeMap[ControlTypePaging], + ControlTypePaging, + false, + c.PagingSize, + c.Cookie) +} + +// SetCookie stores the given cookie in the paging control +func (c *ControlPaging) SetCookie(cookie []byte) { + c.Cookie = cookie +} + +// ControlBeheraPasswordPolicy implements the control described in https://tools.ietf.org/html/draft-behera-ldap-password-policy-10 +type ControlBeheraPasswordPolicy struct { + // Expire contains the number of seconds before a password will expire + Expire int64 + // Grace indicates the remaining number of times a user will be allowed to authenticate with an expired password + Grace int64 + // Error indicates the error code + Error int8 + // ErrorString is a human readable error + ErrorString string +} + +// GetControlType returns the OID +func (c *ControlBeheraPasswordPolicy) GetControlType() string { + return ControlTypeBeheraPasswordPolicy +} + +// Encode returns the ber packet representation +func (c *ControlBeheraPasswordPolicy) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeBeheraPasswordPolicy, "Control Type ("+ControlTypeMap[ControlTypeBeheraPasswordPolicy]+")")) + + return packet +} + +// String returns a human-readable description +func (c *ControlBeheraPasswordPolicy) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t Expire: %d Grace: %d Error: %d, ErrorString: %s", + ControlTypeMap[ControlTypeBeheraPasswordPolicy], + ControlTypeBeheraPasswordPolicy, + false, + c.Expire, + c.Grace, + c.Error, + c.ErrorString) +} + +// ControlVChuPasswordMustChange implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00 +type ControlVChuPasswordMustChange struct { + // MustChange indicates if the password is required to be changed + MustChange bool +} + +// GetControlType returns the OID +func (c *ControlVChuPasswordMustChange) GetControlType() string { + return ControlTypeVChuPasswordMustChange +} + +// Encode returns the ber packet representation +func (c *ControlVChuPasswordMustChange) Encode() *ber.Packet { + return nil +} + +// String returns a human-readable description +func (c *ControlVChuPasswordMustChange) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t MustChange: %v", + ControlTypeMap[ControlTypeVChuPasswordMustChange], + ControlTypeVChuPasswordMustChange, + false, + c.MustChange) +} + +// ControlVChuPasswordWarning implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00 +type ControlVChuPasswordWarning struct { + // Expire indicates the time in seconds until the password expires + Expire int64 +} + +// GetControlType returns the OID +func (c *ControlVChuPasswordWarning) GetControlType() string { + return ControlTypeVChuPasswordWarning +} + +// Encode returns the ber packet representation +func (c *ControlVChuPasswordWarning) Encode() *ber.Packet { + return nil +} + +// String returns a human-readable description +func (c *ControlVChuPasswordWarning) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t Expire: %b", + ControlTypeMap[ControlTypeVChuPasswordWarning], + ControlTypeVChuPasswordWarning, + false, + c.Expire) +} + +// ControlManageDsaIT implements the control described in https://tools.ietf.org/html/rfc3296 +type ControlManageDsaIT struct { + // Criticality indicates if this control is required + Criticality bool +} + +// GetControlType returns the OID +func (c *ControlManageDsaIT) GetControlType() string { + return ControlTypeManageDsaIT +} + +// Encode returns the ber packet representation +func (c *ControlManageDsaIT) Encode() *ber.Packet { + //FIXME + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeManageDsaIT, "Control Type ("+ControlTypeMap[ControlTypeManageDsaIT]+")")) + if c.Criticality { + packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality")) + } + return packet +} + +// String returns a human-readable description +func (c *ControlManageDsaIT) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t", + ControlTypeMap[ControlTypeManageDsaIT], + ControlTypeManageDsaIT, + c.Criticality) +} + +// NewControlManageDsaIT returns a ControlManageDsaIT control +func NewControlManageDsaIT(Criticality bool) *ControlManageDsaIT { + return &ControlManageDsaIT{Criticality: Criticality} +} + +// ControlMicrosoftNotification implements the control described in https://msdn.microsoft.com/en-us/library/aa366983(v=vs.85).aspx +type ControlMicrosoftNotification struct{} + +// GetControlType returns the OID +func (c *ControlMicrosoftNotification) GetControlType() string { + return ControlTypeMicrosoftNotification +} + +// Encode returns the ber packet representation +func (c *ControlMicrosoftNotification) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeMicrosoftNotification, "Control Type ("+ControlTypeMap[ControlTypeMicrosoftNotification]+")")) + + return packet +} + +// String returns a human-readable description +func (c *ControlMicrosoftNotification) String() string { + return fmt.Sprintf( + "Control Type: %s (%q)", + ControlTypeMap[ControlTypeMicrosoftNotification], + ControlTypeMicrosoftNotification) +} + +// NewControlMicrosoftNotification returns a ControlMicrosoftNotification control +func NewControlMicrosoftNotification() *ControlMicrosoftNotification { + return &ControlMicrosoftNotification{} +} + +// ControlMicrosoftShowDeleted implements the control described in https://msdn.microsoft.com/en-us/library/aa366989(v=vs.85).aspx +type ControlMicrosoftShowDeleted struct{} + +// GetControlType returns the OID +func (c *ControlMicrosoftShowDeleted) GetControlType() string { + return ControlTypeMicrosoftShowDeleted +} + +// Encode returns the ber packet representation +func (c *ControlMicrosoftShowDeleted) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeMicrosoftShowDeleted, "Control Type ("+ControlTypeMap[ControlTypeMicrosoftShowDeleted]+")")) + + return packet +} + +// String returns a human-readable description +func (c *ControlMicrosoftShowDeleted) String() string { + return fmt.Sprintf( + "Control Type: %s (%q)", + ControlTypeMap[ControlTypeMicrosoftShowDeleted], + ControlTypeMicrosoftShowDeleted) +} + +// NewControlMicrosoftShowDeleted returns a ControlMicrosoftShowDeleted control +func NewControlMicrosoftShowDeleted() *ControlMicrosoftShowDeleted { + return &ControlMicrosoftShowDeleted{} +} + +// ControlMicrosoftServerLinkTTL implements the control described in https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-adts/f4f523a8-abc0-4b3a-a471-6b2fef135481?redirectedfrom=MSDN +type ControlMicrosoftServerLinkTTL struct{} + +// GetControlType returns the OID +func (c *ControlMicrosoftServerLinkTTL) GetControlType() string { + return ControlTypeMicrosoftServerLinkTTL +} + +// Encode returns the ber packet representation +func (c *ControlMicrosoftServerLinkTTL) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeMicrosoftServerLinkTTL, "Control Type ("+ControlTypeMap[ControlTypeMicrosoftServerLinkTTL]+")")) + + return packet +} + +// String returns a human-readable description +func (c *ControlMicrosoftServerLinkTTL) String() string { + return fmt.Sprintf( + "Control Type: %s (%q)", + ControlTypeMap[ControlTypeMicrosoftServerLinkTTL], + ControlTypeMicrosoftServerLinkTTL) +} + +// NewControlMicrosoftServerLinkTTL returns a ControlMicrosoftServerLinkTTL control +func NewControlMicrosoftServerLinkTTL() *ControlMicrosoftServerLinkTTL { + return &ControlMicrosoftServerLinkTTL{} +} + +// FindControl returns the first control of the given type in the list, or nil +func FindControl(controls []Control, controlType string) Control { + for _, c := range controls { + if c.GetControlType() == controlType { + return c + } + } + return nil +} + +// DecodeControl returns a control read from the given packet, or nil if no recognized control can be made +func DecodeControl(packet *ber.Packet) (Control, error) { + var ( + ControlType = "" + Criticality = false + value *ber.Packet + ) + + switch len(packet.Children) { + case 0: + // at least one child is required for control type + return nil, fmt.Errorf("at least one child is required for control type") + + case 1: + // just type, no criticality or value + packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")" + ControlType = packet.Children[0].Value.(string) + + case 2: + packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")" + ControlType = packet.Children[0].Value.(string) + + // Children[1] could be criticality or value (both are optional) + // duck-type on whether this is a boolean + if _, ok := packet.Children[1].Value.(bool); ok { + packet.Children[1].Description = "Criticality" + Criticality = packet.Children[1].Value.(bool) + } else { + packet.Children[1].Description = "Control Value" + value = packet.Children[1] + } + + case 3: + packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")" + ControlType = packet.Children[0].Value.(string) + + packet.Children[1].Description = "Criticality" + Criticality = packet.Children[1].Value.(bool) + + packet.Children[2].Description = "Control Value" + value = packet.Children[2] + + default: + // more than 3 children is invalid + return nil, fmt.Errorf("more than 3 children is invalid for controls") + } + + switch ControlType { + case ControlTypeManageDsaIT: + return NewControlManageDsaIT(Criticality), nil + case ControlTypePaging: + value.Description += " (Paging)" + c := new(ControlPaging) + if value.Value != nil { + valueChildren, err := ber.DecodePacketErr(value.Data.Bytes()) + if err != nil { + return nil, fmt.Errorf("failed to decode data bytes: %s", err) + } + value.Data.Truncate(0) + value.Value = nil + value.AppendChild(valueChildren) + } + value = value.Children[0] + value.Description = "Search Control Value" + value.Children[0].Description = "Paging Size" + value.Children[1].Description = "Cookie" + c.PagingSize = uint32(value.Children[0].Value.(int64)) + c.Cookie = value.Children[1].Data.Bytes() + value.Children[1].Value = c.Cookie + return c, nil + case ControlTypeBeheraPasswordPolicy: + value.Description += " (Password Policy - Behera)" + c := NewControlBeheraPasswordPolicy() + if value.Value != nil { + valueChildren, err := ber.DecodePacketErr(value.Data.Bytes()) + if err != nil { + return nil, fmt.Errorf("failed to decode data bytes: %s", err) + } + value.Data.Truncate(0) + value.Value = nil + value.AppendChild(valueChildren) + } + + sequence := value.Children[0] + + for _, child := range sequence.Children { + if child.Tag == 0 { + //Warning + warningPacket := child.Children[0] + val, err := ber.ParseInt64(warningPacket.Data.Bytes()) + if err != nil { + return nil, fmt.Errorf("failed to decode data bytes: %s", err) + } + if warningPacket.Tag == 0 { + //timeBeforeExpiration + c.Expire = val + warningPacket.Value = c.Expire + } else if warningPacket.Tag == 1 { + //graceAuthNsRemaining + c.Grace = val + warningPacket.Value = c.Grace + } + } else if child.Tag == 1 { + // Error + bs := child.Data.Bytes() + if len(bs) != 1 || bs[0] > 8 { + return nil, fmt.Errorf("failed to decode data bytes: %s", "invalid PasswordPolicyResponse enum value") + } + val := int8(bs[0]) + c.Error = val + child.Value = c.Error + c.ErrorString = BeheraPasswordPolicyErrorMap[c.Error] + } + } + return c, nil + case ControlTypeVChuPasswordMustChange: + c := &ControlVChuPasswordMustChange{MustChange: true} + return c, nil + case ControlTypeVChuPasswordWarning: + c := &ControlVChuPasswordWarning{Expire: -1} + expireStr := ber.DecodeString(value.Data.Bytes()) + + expire, err := strconv.ParseInt(expireStr, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse value as int: %s", err) + } + c.Expire = expire + value.Value = c.Expire + + return c, nil + case ControlTypeMicrosoftNotification: + return NewControlMicrosoftNotification(), nil + case ControlTypeMicrosoftShowDeleted: + return NewControlMicrosoftShowDeleted(), nil + case ControlTypeMicrosoftServerLinkTTL: + return NewControlMicrosoftServerLinkTTL(), nil + default: + c := new(ControlString) + c.ControlType = ControlType + c.Criticality = Criticality + if value != nil { + c.ControlValue = value.Value.(string) + } + return c, nil + } +} + +// NewControlString returns a generic control +func NewControlString(controlType string, criticality bool, controlValue string) *ControlString { + return &ControlString{ + ControlType: controlType, + Criticality: criticality, + ControlValue: controlValue, + } +} + +// NewControlPaging returns a paging control +func NewControlPaging(pagingSize uint32) *ControlPaging { + return &ControlPaging{PagingSize: pagingSize} +} + +// NewControlBeheraPasswordPolicy returns a ControlBeheraPasswordPolicy +func NewControlBeheraPasswordPolicy() *ControlBeheraPasswordPolicy { + return &ControlBeheraPasswordPolicy{ + Expire: -1, + Grace: -1, + Error: -1, + } +} + +func encodeControls(controls []Control) *ber.Packet { + packet := ber.Encode(ber.ClassContext, ber.TypeConstructed, 0, nil, "Controls") + for _, control := range controls { + packet.AppendChild(control.Encode()) + } + return packet +} diff --git a/vendor/github.com/go-ldap/ldap/v3/debug.go b/vendor/github.com/go-ldap/ldap/v3/debug.go new file mode 100644 index 0000000000000..74cc65a43a379 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/debug.go @@ -0,0 +1,28 @@ +package ldap + +import ( + ber "github.com/go-asn1-ber/asn1-ber" +) + +// debugging type +// - has a Printf method to write the debug output +type debugging bool + +// Enable controls debugging mode. +func (debug *debugging) Enable(b bool) { + *debug = debugging(b) +} + +// Printf writes debug output. +func (debug debugging) Printf(format string, args ...interface{}) { + if debug { + logger.Printf(format, args...) + } +} + +// PrintPacket dumps a packet. +func (debug debugging) PrintPacket(packet *ber.Packet) { + if debug { + ber.WritePacket(logger.Writer(), packet) + } +} diff --git a/vendor/github.com/go-ldap/ldap/v3/del.go b/vendor/github.com/go-ldap/ldap/v3/del.go new file mode 100644 index 0000000000000..bac0dfb795e64 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/del.go @@ -0,0 +1,57 @@ +package ldap + +import ( + ber "github.com/go-asn1-ber/asn1-ber" +) + +// DelRequest implements an LDAP deletion request +type DelRequest struct { + // DN is the name of the directory entry to delete + DN string + // Controls hold optional controls to send with the request + Controls []Control +} + +func (req *DelRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypePrimitive, ApplicationDelRequest, req.DN, "Del Request") + pkt.Data.Write([]byte(req.DN)) + + envelope.AppendChild(pkt) + if len(req.Controls) > 0 { + envelope.AppendChild(encodeControls(req.Controls)) + } + + return nil +} + +// NewDelRequest creates a delete request for the given DN and controls +func NewDelRequest(DN string, Controls []Control) *DelRequest { + return &DelRequest{ + DN: DN, + Controls: Controls, + } +} + +// Del executes the given delete request +func (l *Conn) Del(delRequest *DelRequest) error { + msgCtx, err := l.doRequest(delRequest) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + packet, err := l.readPacket(msgCtx) + if err != nil { + return err + } + + if packet.Children[1].Tag == ApplicationDelResponse { + err := GetLDAPError(packet) + if err != nil { + return err + } + } else { + logger.Printf("Unexpected Response: %d", packet.Children[1].Tag) + } + return nil +} diff --git a/vendor/github.com/go-ldap/ldap/v3/dn.go b/vendor/github.com/go-ldap/ldap/v3/dn.go new file mode 100644 index 0000000000000..d802580e58928 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/dn.go @@ -0,0 +1,270 @@ +package ldap + +import ( + "bytes" + enchex "encoding/hex" + "errors" + "fmt" + "strings" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +// AttributeTypeAndValue represents an attributeTypeAndValue from https://tools.ietf.org/html/rfc4514 +type AttributeTypeAndValue struct { + // Type is the attribute type + Type string + // Value is the attribute value + Value string +} + +// RelativeDN represents a relativeDistinguishedName from https://tools.ietf.org/html/rfc4514 +type RelativeDN struct { + Attributes []*AttributeTypeAndValue +} + +// DN represents a distinguishedName from https://tools.ietf.org/html/rfc4514 +type DN struct { + RDNs []*RelativeDN +} + +// ParseDN returns a distinguishedName or an error. +// The function respects https://tools.ietf.org/html/rfc4514 +func ParseDN(str string) (*DN, error) { + dn := new(DN) + dn.RDNs = make([]*RelativeDN, 0) + rdn := new(RelativeDN) + rdn.Attributes = make([]*AttributeTypeAndValue, 0) + buffer := bytes.Buffer{} + attribute := new(AttributeTypeAndValue) + escaping := false + + unescapedTrailingSpaces := 0 + stringFromBuffer := func() string { + s := buffer.String() + s = s[0 : len(s)-unescapedTrailingSpaces] + buffer.Reset() + unescapedTrailingSpaces = 0 + return s + } + + for i := 0; i < len(str); i++ { + char := str[i] + switch { + case escaping: + unescapedTrailingSpaces = 0 + escaping = false + switch char { + case ' ', '"', '#', '+', ',', ';', '<', '=', '>', '\\': + buffer.WriteByte(char) + continue + } + // Not a special character, assume hex encoded octet + if len(str) == i+1 { + return nil, errors.New("got corrupted escaped character") + } + + dst := []byte{0} + n, err := enchex.Decode([]byte(dst), []byte(str[i:i+2])) + if err != nil { + return nil, fmt.Errorf("failed to decode escaped character: %s", err) + } else if n != 1 { + return nil, fmt.Errorf("expected 1 byte when un-escaping, got %d", n) + } + buffer.WriteByte(dst[0]) + i++ + case char == '\\': + unescapedTrailingSpaces = 0 + escaping = true + case char == '=': + attribute.Type = stringFromBuffer() + // Special case: If the first character in the value is # the + // following data is BER encoded so we can just fast forward + // and decode. + if len(str) > i+1 && str[i+1] == '#' { + i += 2 + index := strings.IndexAny(str[i:], ",+") + data := str + if index > 0 { + data = str[i : i+index] + } else { + data = str[i:] + } + rawBER, err := enchex.DecodeString(data) + if err != nil { + return nil, fmt.Errorf("failed to decode BER encoding: %s", err) + } + packet, err := ber.DecodePacketErr(rawBER) + if err != nil { + return nil, fmt.Errorf("failed to decode BER packet: %s", err) + } + buffer.WriteString(packet.Data.String()) + i += len(data) - 1 + } + case char == ',' || char == '+' || char == ';': + // We're done with this RDN or value, push it + if len(attribute.Type) == 0 { + return nil, errors.New("incomplete type, value pair") + } + attribute.Value = stringFromBuffer() + rdn.Attributes = append(rdn.Attributes, attribute) + attribute = new(AttributeTypeAndValue) + if char == ',' || char == ';' { + dn.RDNs = append(dn.RDNs, rdn) + rdn = new(RelativeDN) + rdn.Attributes = make([]*AttributeTypeAndValue, 0) + } + case char == ' ' && buffer.Len() == 0: + // ignore unescaped leading spaces + continue + default: + if char == ' ' { + // Track unescaped spaces in case they are trailing and we need to remove them + unescapedTrailingSpaces++ + } else { + // Reset if we see a non-space char + unescapedTrailingSpaces = 0 + } + buffer.WriteByte(char) + } + } + if buffer.Len() > 0 { + if len(attribute.Type) == 0 { + return nil, errors.New("DN ended with incomplete type, value pair") + } + attribute.Value = stringFromBuffer() + rdn.Attributes = append(rdn.Attributes, attribute) + dn.RDNs = append(dn.RDNs, rdn) + } + return dn, nil +} + +// Equal returns true if the DNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch). +// Returns true if they have the same number of relative distinguished names +// and corresponding relative distinguished names (by position) are the same. +func (d *DN) Equal(other *DN) bool { + if len(d.RDNs) != len(other.RDNs) { + return false + } + for i := range d.RDNs { + if !d.RDNs[i].Equal(other.RDNs[i]) { + return false + } + } + return true +} + +// AncestorOf returns true if the other DN consists of at least one RDN followed by all the RDNs of the current DN. +// "ou=widgets,o=acme.com" is an ancestor of "ou=sprockets,ou=widgets,o=acme.com" +// "ou=widgets,o=acme.com" is not an ancestor of "ou=sprockets,ou=widgets,o=foo.com" +// "ou=widgets,o=acme.com" is not an ancestor of "ou=widgets,o=acme.com" +func (d *DN) AncestorOf(other *DN) bool { + if len(d.RDNs) >= len(other.RDNs) { + return false + } + // Take the last `len(d.RDNs)` RDNs from the other DN to compare against + otherRDNs := other.RDNs[len(other.RDNs)-len(d.RDNs):] + for i := range d.RDNs { + if !d.RDNs[i].Equal(otherRDNs[i]) { + return false + } + } + return true +} + +// Equal returns true if the RelativeDNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch). +// Relative distinguished names are the same if and only if they have the same number of AttributeTypeAndValues +// and each attribute of the first RDN is the same as the attribute of the second RDN with the same attribute type. +// The order of attributes is not significant. +// Case of attribute types is not significant. +func (r *RelativeDN) Equal(other *RelativeDN) bool { + if len(r.Attributes) != len(other.Attributes) { + return false + } + return r.hasAllAttributes(other.Attributes) && other.hasAllAttributes(r.Attributes) +} + +func (r *RelativeDN) hasAllAttributes(attrs []*AttributeTypeAndValue) bool { + for _, attr := range attrs { + found := false + for _, myattr := range r.Attributes { + if myattr.Equal(attr) { + found = true + break + } + } + if !found { + return false + } + } + return true +} + +// Equal returns true if the AttributeTypeAndValue is equivalent to the specified AttributeTypeAndValue +// Case of the attribute type is not significant +func (a *AttributeTypeAndValue) Equal(other *AttributeTypeAndValue) bool { + return strings.EqualFold(a.Type, other.Type) && a.Value == other.Value +} + +// Equal returns true if the DNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch). +// Returns true if they have the same number of relative distinguished names +// and corresponding relative distinguished names (by position) are the same. +// Case of the attribute type and value is not significant +func (d *DN) EqualFold(other *DN) bool { + if len(d.RDNs) != len(other.RDNs) { + return false + } + for i := range d.RDNs { + if !d.RDNs[i].EqualFold(other.RDNs[i]) { + return false + } + } + return true +} + +// AncestorOfFold returns true if the other DN consists of at least one RDN followed by all the RDNs of the current DN. +// Case of the attribute type and value is not significant +func (d *DN) AncestorOfFold(other *DN) bool { + if len(d.RDNs) >= len(other.RDNs) { + return false + } + // Take the last `len(d.RDNs)` RDNs from the other DN to compare against + otherRDNs := other.RDNs[len(other.RDNs)-len(d.RDNs):] + for i := range d.RDNs { + if !d.RDNs[i].EqualFold(otherRDNs[i]) { + return false + } + } + return true +} + +// Equal returns true if the RelativeDNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch). +// Case of the attribute type is not significant +func (r *RelativeDN) EqualFold(other *RelativeDN) bool { + if len(r.Attributes) != len(other.Attributes) { + return false + } + return r.hasAllAttributesFold(other.Attributes) && other.hasAllAttributesFold(r.Attributes) +} + +func (r *RelativeDN) hasAllAttributesFold(attrs []*AttributeTypeAndValue) bool { + for _, attr := range attrs { + found := false + for _, myattr := range r.Attributes { + if myattr.EqualFold(attr) { + found = true + break + } + } + if !found { + return false + } + } + return true +} + +// EqualFold returns true if the AttributeTypeAndValue is equivalent to the specified AttributeTypeAndValue +// Case of the attribute type and value is not significant +func (a *AttributeTypeAndValue) EqualFold(other *AttributeTypeAndValue) bool { + return strings.EqualFold(a.Type, other.Type) && strings.EqualFold(a.Value, other.Value) +} diff --git a/vendor/github.com/go-ldap/ldap/v3/doc.go b/vendor/github.com/go-ldap/ldap/v3/doc.go new file mode 100644 index 0000000000000..f20d39bc99f95 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/doc.go @@ -0,0 +1,4 @@ +/* +Package ldap provides basic LDAP v3 functionality. +*/ +package ldap diff --git a/vendor/github.com/go-ldap/ldap/v3/error.go b/vendor/github.com/go-ldap/ldap/v3/error.go new file mode 100644 index 0000000000000..3cdb7b318c442 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/error.go @@ -0,0 +1,253 @@ +package ldap + +import ( + "fmt" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +// LDAP Result Codes +const ( + LDAPResultSuccess = 0 + LDAPResultOperationsError = 1 + LDAPResultProtocolError = 2 + LDAPResultTimeLimitExceeded = 3 + LDAPResultSizeLimitExceeded = 4 + LDAPResultCompareFalse = 5 + LDAPResultCompareTrue = 6 + LDAPResultAuthMethodNotSupported = 7 + LDAPResultStrongAuthRequired = 8 + LDAPResultReferral = 10 + LDAPResultAdminLimitExceeded = 11 + LDAPResultUnavailableCriticalExtension = 12 + LDAPResultConfidentialityRequired = 13 + LDAPResultSaslBindInProgress = 14 + LDAPResultNoSuchAttribute = 16 + LDAPResultUndefinedAttributeType = 17 + LDAPResultInappropriateMatching = 18 + LDAPResultConstraintViolation = 19 + LDAPResultAttributeOrValueExists = 20 + LDAPResultInvalidAttributeSyntax = 21 + LDAPResultNoSuchObject = 32 + LDAPResultAliasProblem = 33 + LDAPResultInvalidDNSyntax = 34 + LDAPResultIsLeaf = 35 + LDAPResultAliasDereferencingProblem = 36 + LDAPResultInappropriateAuthentication = 48 + LDAPResultInvalidCredentials = 49 + LDAPResultInsufficientAccessRights = 50 + LDAPResultBusy = 51 + LDAPResultUnavailable = 52 + LDAPResultUnwillingToPerform = 53 + LDAPResultLoopDetect = 54 + LDAPResultSortControlMissing = 60 + LDAPResultOffsetRangeError = 61 + LDAPResultNamingViolation = 64 + LDAPResultObjectClassViolation = 65 + LDAPResultNotAllowedOnNonLeaf = 66 + LDAPResultNotAllowedOnRDN = 67 + LDAPResultEntryAlreadyExists = 68 + LDAPResultObjectClassModsProhibited = 69 + LDAPResultResultsTooLarge = 70 + LDAPResultAffectsMultipleDSAs = 71 + LDAPResultVirtualListViewErrorOrControlError = 76 + LDAPResultOther = 80 + LDAPResultServerDown = 81 + LDAPResultLocalError = 82 + LDAPResultEncodingError = 83 + LDAPResultDecodingError = 84 + LDAPResultTimeout = 85 + LDAPResultAuthUnknown = 86 + LDAPResultFilterError = 87 + LDAPResultUserCanceled = 88 + LDAPResultParamError = 89 + LDAPResultNoMemory = 90 + LDAPResultConnectError = 91 + LDAPResultNotSupported = 92 + LDAPResultControlNotFound = 93 + LDAPResultNoResultsReturned = 94 + LDAPResultMoreResultsToReturn = 95 + LDAPResultClientLoop = 96 + LDAPResultReferralLimitExceeded = 97 + LDAPResultInvalidResponse = 100 + LDAPResultAmbiguousResponse = 101 + LDAPResultTLSNotSupported = 112 + LDAPResultIntermediateResponse = 113 + LDAPResultUnknownType = 114 + LDAPResultCanceled = 118 + LDAPResultNoSuchOperation = 119 + LDAPResultTooLate = 120 + LDAPResultCannotCancel = 121 + LDAPResultAssertionFailed = 122 + LDAPResultAuthorizationDenied = 123 + LDAPResultSyncRefreshRequired = 4096 + + ErrorNetwork = 200 + ErrorFilterCompile = 201 + ErrorFilterDecompile = 202 + ErrorDebugging = 203 + ErrorUnexpectedMessage = 204 + ErrorUnexpectedResponse = 205 + ErrorEmptyPassword = 206 +) + +// LDAPResultCodeMap contains string descriptions for LDAP error codes +var LDAPResultCodeMap = map[uint16]string{ + LDAPResultSuccess: "Success", + LDAPResultOperationsError: "Operations Error", + LDAPResultProtocolError: "Protocol Error", + LDAPResultTimeLimitExceeded: "Time Limit Exceeded", + LDAPResultSizeLimitExceeded: "Size Limit Exceeded", + LDAPResultCompareFalse: "Compare False", + LDAPResultCompareTrue: "Compare True", + LDAPResultAuthMethodNotSupported: "Auth Method Not Supported", + LDAPResultStrongAuthRequired: "Strong Auth Required", + LDAPResultReferral: "Referral", + LDAPResultAdminLimitExceeded: "Admin Limit Exceeded", + LDAPResultUnavailableCriticalExtension: "Unavailable Critical Extension", + LDAPResultConfidentialityRequired: "Confidentiality Required", + LDAPResultSaslBindInProgress: "Sasl Bind In Progress", + LDAPResultNoSuchAttribute: "No Such Attribute", + LDAPResultUndefinedAttributeType: "Undefined Attribute Type", + LDAPResultInappropriateMatching: "Inappropriate Matching", + LDAPResultConstraintViolation: "Constraint Violation", + LDAPResultAttributeOrValueExists: "Attribute Or Value Exists", + LDAPResultInvalidAttributeSyntax: "Invalid Attribute Syntax", + LDAPResultNoSuchObject: "No Such Object", + LDAPResultAliasProblem: "Alias Problem", + LDAPResultInvalidDNSyntax: "Invalid DN Syntax", + LDAPResultIsLeaf: "Is Leaf", + LDAPResultAliasDereferencingProblem: "Alias Dereferencing Problem", + LDAPResultInappropriateAuthentication: "Inappropriate Authentication", + LDAPResultInvalidCredentials: "Invalid Credentials", + LDAPResultInsufficientAccessRights: "Insufficient Access Rights", + LDAPResultBusy: "Busy", + LDAPResultUnavailable: "Unavailable", + LDAPResultUnwillingToPerform: "Unwilling To Perform", + LDAPResultLoopDetect: "Loop Detect", + LDAPResultSortControlMissing: "Sort Control Missing", + LDAPResultOffsetRangeError: "Result Offset Range Error", + LDAPResultNamingViolation: "Naming Violation", + LDAPResultObjectClassViolation: "Object Class Violation", + LDAPResultResultsTooLarge: "Results Too Large", + LDAPResultNotAllowedOnNonLeaf: "Not Allowed On Non Leaf", + LDAPResultNotAllowedOnRDN: "Not Allowed On RDN", + LDAPResultEntryAlreadyExists: "Entry Already Exists", + LDAPResultObjectClassModsProhibited: "Object Class Mods Prohibited", + LDAPResultAffectsMultipleDSAs: "Affects Multiple DSAs", + LDAPResultVirtualListViewErrorOrControlError: "Failed because of a problem related to the virtual list view", + LDAPResultOther: "Other", + LDAPResultServerDown: "Cannot establish a connection", + LDAPResultLocalError: "An error occurred", + LDAPResultEncodingError: "LDAP encountered an error while encoding", + LDAPResultDecodingError: "LDAP encountered an error while decoding", + LDAPResultTimeout: "LDAP timeout while waiting for a response from the server", + LDAPResultAuthUnknown: "The auth method requested in a bind request is unknown", + LDAPResultFilterError: "An error occurred while encoding the given search filter", + LDAPResultUserCanceled: "The user canceled the operation", + LDAPResultParamError: "An invalid parameter was specified", + LDAPResultNoMemory: "Out of memory error", + LDAPResultConnectError: "A connection to the server could not be established", + LDAPResultNotSupported: "An attempt has been made to use a feature not supported LDAP", + LDAPResultControlNotFound: "The controls required to perform the requested operation were not found", + LDAPResultNoResultsReturned: "No results were returned from the server", + LDAPResultMoreResultsToReturn: "There are more results in the chain of results", + LDAPResultClientLoop: "A loop has been detected. For example when following referrals", + LDAPResultReferralLimitExceeded: "The referral hop limit has been exceeded", + LDAPResultCanceled: "Operation was canceled", + LDAPResultNoSuchOperation: "Server has no knowledge of the operation requested for cancellation", + LDAPResultTooLate: "Too late to cancel the outstanding operation", + LDAPResultCannotCancel: "The identified operation does not support cancellation or the cancel operation cannot be performed", + LDAPResultAssertionFailed: "An assertion control given in the LDAP operation evaluated to false causing the operation to not be performed", + LDAPResultSyncRefreshRequired: "Refresh Required", + LDAPResultInvalidResponse: "Invalid Response", + LDAPResultAmbiguousResponse: "Ambiguous Response", + LDAPResultTLSNotSupported: "Tls Not Supported", + LDAPResultIntermediateResponse: "Intermediate Response", + LDAPResultUnknownType: "Unknown Type", + LDAPResultAuthorizationDenied: "Authorization Denied", + + ErrorNetwork: "Network Error", + ErrorFilterCompile: "Filter Compile Error", + ErrorFilterDecompile: "Filter Decompile Error", + ErrorDebugging: "Debugging Error", + ErrorUnexpectedMessage: "Unexpected Message", + ErrorUnexpectedResponse: "Unexpected Response", + ErrorEmptyPassword: "Empty password not allowed by the client", +} + +// Error holds LDAP error information +type Error struct { + // Err is the underlying error + Err error + // ResultCode is the LDAP error code + ResultCode uint16 + // MatchedDN is the matchedDN returned if any + MatchedDN string + // Packet is the returned packet if any + Packet *ber.Packet +} + +func (e *Error) Error() string { + return fmt.Sprintf("LDAP Result Code %d %q: %s", e.ResultCode, LDAPResultCodeMap[e.ResultCode], e.Err.Error()) +} + +// GetLDAPError creates an Error out of a BER packet representing a LDAPResult +// The return is an error object. It can be casted to a Error structure. +// This function returns nil if resultCode in the LDAPResult sequence is success(0). +func GetLDAPError(packet *ber.Packet) error { + if packet == nil { + return &Error{ResultCode: ErrorUnexpectedResponse, Err: fmt.Errorf("Empty packet")} + } + + if len(packet.Children) >= 2 { + response := packet.Children[1] + if response == nil { + return &Error{ResultCode: ErrorUnexpectedResponse, Err: fmt.Errorf("Empty response in packet"), Packet: packet} + } + if response.ClassType == ber.ClassApplication && response.TagType == ber.TypeConstructed && len(response.Children) >= 3 { + resultCode := uint16(response.Children[0].Value.(int64)) + if resultCode == 0 { // No error + return nil + } + return &Error{ + ResultCode: resultCode, + MatchedDN: response.Children[1].Value.(string), + Err: fmt.Errorf("%s", response.Children[2].Value.(string)), + Packet: packet, + } + } + } + + return &Error{ResultCode: ErrorNetwork, Err: fmt.Errorf("Invalid packet format"), Packet: packet} +} + +// NewError creates an LDAP error with the given code and underlying error +func NewError(resultCode uint16, err error) error { + return &Error{ResultCode: resultCode, Err: err} +} + +// IsErrorAnyOf returns true if the given error is an LDAP error with any one of the given result codes +func IsErrorAnyOf(err error, codes ...uint16) bool { + if err == nil { + return false + } + + serverError, ok := err.(*Error) + if !ok { + return false + } + + for _, code := range codes { + if serverError.ResultCode == code { + return true + } + } + + return false +} + +// IsErrorWithCode returns true if the given error is an LDAP error with the given result code +func IsErrorWithCode(err error, desiredResultCode uint16) bool { + return IsErrorAnyOf(err, desiredResultCode) +} diff --git a/vendor/github.com/go-ldap/ldap/v3/filter.go b/vendor/github.com/go-ldap/ldap/v3/filter.go new file mode 100644 index 0000000000000..db76210c10e5b --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/filter.go @@ -0,0 +1,486 @@ +package ldap + +import ( + "bytes" + hexpac "encoding/hex" + "errors" + "fmt" + "io" + "strings" + "unicode" + "unicode/utf8" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +// Filter choices +const ( + FilterAnd = 0 + FilterOr = 1 + FilterNot = 2 + FilterEqualityMatch = 3 + FilterSubstrings = 4 + FilterGreaterOrEqual = 5 + FilterLessOrEqual = 6 + FilterPresent = 7 + FilterApproxMatch = 8 + FilterExtensibleMatch = 9 +) + +// FilterMap contains human readable descriptions of Filter choices +var FilterMap = map[uint64]string{ + FilterAnd: "And", + FilterOr: "Or", + FilterNot: "Not", + FilterEqualityMatch: "Equality Match", + FilterSubstrings: "Substrings", + FilterGreaterOrEqual: "Greater Or Equal", + FilterLessOrEqual: "Less Or Equal", + FilterPresent: "Present", + FilterApproxMatch: "Approx Match", + FilterExtensibleMatch: "Extensible Match", +} + +// SubstringFilter options +const ( + FilterSubstringsInitial = 0 + FilterSubstringsAny = 1 + FilterSubstringsFinal = 2 +) + +// FilterSubstringsMap contains human readable descriptions of SubstringFilter choices +var FilterSubstringsMap = map[uint64]string{ + FilterSubstringsInitial: "Substrings Initial", + FilterSubstringsAny: "Substrings Any", + FilterSubstringsFinal: "Substrings Final", +} + +// MatchingRuleAssertion choices +const ( + MatchingRuleAssertionMatchingRule = 1 + MatchingRuleAssertionType = 2 + MatchingRuleAssertionMatchValue = 3 + MatchingRuleAssertionDNAttributes = 4 +) + +// MatchingRuleAssertionMap contains human readable descriptions of MatchingRuleAssertion choices +var MatchingRuleAssertionMap = map[uint64]string{ + MatchingRuleAssertionMatchingRule: "Matching Rule Assertion Matching Rule", + MatchingRuleAssertionType: "Matching Rule Assertion Type", + MatchingRuleAssertionMatchValue: "Matching Rule Assertion Match Value", + MatchingRuleAssertionDNAttributes: "Matching Rule Assertion DN Attributes", +} + +var _SymbolAny = []byte{'*'} + +// CompileFilter converts a string representation of a filter into a BER-encoded packet +func CompileFilter(filter string) (*ber.Packet, error) { + if len(filter) == 0 || filter[0] != '(' { + return nil, NewError(ErrorFilterCompile, errors.New("ldap: filter does not start with an '('")) + } + packet, pos, err := compileFilter(filter, 1) + if err != nil { + return nil, err + } + switch { + case pos > len(filter): + return nil, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter")) + case pos < len(filter): + return nil, NewError(ErrorFilterCompile, errors.New("ldap: finished compiling filter with extra at end: "+fmt.Sprint(filter[pos:]))) + } + return packet, nil +} + +// DecompileFilter converts a packet representation of a filter into a string representation +func DecompileFilter(packet *ber.Packet) (_ string, err error) { + defer func() { + if r := recover(); r != nil { + err = NewError(ErrorFilterDecompile, errors.New("ldap: error decompiling filter")) + } + }() + + buf := bytes.NewBuffer(nil) + buf.WriteByte('(') + childStr := "" + + switch packet.Tag { + case FilterAnd: + buf.WriteByte('&') + for _, child := range packet.Children { + childStr, err = DecompileFilter(child) + if err != nil { + return + } + buf.WriteString(childStr) + } + case FilterOr: + buf.WriteByte('|') + for _, child := range packet.Children { + childStr, err = DecompileFilter(child) + if err != nil { + return + } + buf.WriteString(childStr) + } + case FilterNot: + buf.WriteByte('!') + childStr, err = DecompileFilter(packet.Children[0]) + if err != nil { + return + } + buf.WriteString(childStr) + + case FilterSubstrings: + buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes())) + buf.WriteByte('=') + for i, child := range packet.Children[1].Children { + if i == 0 && child.Tag != FilterSubstringsInitial { + buf.Write(_SymbolAny) + } + buf.WriteString(EscapeFilter(ber.DecodeString(child.Data.Bytes()))) + if child.Tag != FilterSubstringsFinal { + buf.Write(_SymbolAny) + } + } + case FilterEqualityMatch: + buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes())) + buf.WriteByte('=') + buf.WriteString(EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))) + case FilterGreaterOrEqual: + buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes())) + buf.WriteString(">=") + buf.WriteString(EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))) + case FilterLessOrEqual: + buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes())) + buf.WriteString("<=") + buf.WriteString(EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))) + case FilterPresent: + buf.WriteString(ber.DecodeString(packet.Data.Bytes())) + buf.WriteString("=*") + case FilterApproxMatch: + buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes())) + buf.WriteString("~=") + buf.WriteString(EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))) + case FilterExtensibleMatch: + attr := "" + dnAttributes := false + matchingRule := "" + value := "" + + for _, child := range packet.Children { + switch child.Tag { + case MatchingRuleAssertionMatchingRule: + matchingRule = ber.DecodeString(child.Data.Bytes()) + case MatchingRuleAssertionType: + attr = ber.DecodeString(child.Data.Bytes()) + case MatchingRuleAssertionMatchValue: + value = ber.DecodeString(child.Data.Bytes()) + case MatchingRuleAssertionDNAttributes: + dnAttributes = child.Value.(bool) + } + } + + if len(attr) > 0 { + buf.WriteString(attr) + } + if dnAttributes { + buf.WriteString(":dn") + } + if len(matchingRule) > 0 { + buf.WriteString(":") + buf.WriteString(matchingRule) + } + buf.WriteString(":=") + buf.WriteString(EscapeFilter(value)) + } + + buf.WriteByte(')') + + return buf.String(), nil +} + +func compileFilterSet(filter string, pos int, parent *ber.Packet) (int, error) { + for pos < len(filter) && filter[pos] == '(' { + child, newPos, err := compileFilter(filter, pos+1) + if err != nil { + return pos, err + } + pos = newPos + parent.AppendChild(child) + } + if pos == len(filter) { + return pos, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter")) + } + + return pos + 1, nil +} + +func compileFilter(filter string, pos int) (*ber.Packet, int, error) { + var ( + packet *ber.Packet + err error + ) + + defer func() { + if r := recover(); r != nil { + err = NewError(ErrorFilterCompile, errors.New("ldap: error compiling filter")) + } + }() + newPos := pos + + currentRune, currentWidth := utf8.DecodeRuneInString(filter[newPos:]) + + switch currentRune { + case utf8.RuneError: + return nil, 0, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos)) + case '(': + packet, newPos, err = compileFilter(filter, pos+currentWidth) + newPos++ + return packet, newPos, err + case '&': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterAnd, nil, FilterMap[FilterAnd]) + newPos, err = compileFilterSet(filter, pos+currentWidth, packet) + return packet, newPos, err + case '|': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterOr, nil, FilterMap[FilterOr]) + newPos, err = compileFilterSet(filter, pos+currentWidth, packet) + return packet, newPos, err + case '!': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterNot, nil, FilterMap[FilterNot]) + var child *ber.Packet + child, newPos, err = compileFilter(filter, pos+currentWidth) + packet.AppendChild(child) + return packet, newPos, err + default: + const ( + stateReadingAttr = 0 + stateReadingExtensibleMatchingRule = 1 + stateReadingCondition = 2 + ) + + state := stateReadingAttr + attribute := bytes.NewBuffer(nil) + extensibleDNAttributes := false + extensibleMatchingRule := bytes.NewBuffer(nil) + condition := bytes.NewBuffer(nil) + + for newPos < len(filter) { + remainingFilter := filter[newPos:] + currentRune, currentWidth = utf8.DecodeRuneInString(remainingFilter) + if currentRune == ')' { + break + } + if currentRune == utf8.RuneError { + return packet, newPos, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos)) + } + + switch state { + case stateReadingAttr: + switch { + // Extensible rule, with only DN-matching + case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch]) + extensibleDNAttributes = true + state = stateReadingCondition + newPos += 5 + + // Extensible rule, with DN-matching and a matching OID + case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:"): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch]) + extensibleDNAttributes = true + state = stateReadingExtensibleMatchingRule + newPos += 4 + + // Extensible rule, with attr only + case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch]) + state = stateReadingCondition + newPos += 2 + + // Extensible rule, with no DN attribute matching + case currentRune == ':': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch]) + state = stateReadingExtensibleMatchingRule + newPos++ + + // Equality condition + case currentRune == '=': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterEqualityMatch, nil, FilterMap[FilterEqualityMatch]) + state = stateReadingCondition + newPos++ + + // Greater-than or equal + case currentRune == '>' && strings.HasPrefix(remainingFilter, ">="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterGreaterOrEqual, nil, FilterMap[FilterGreaterOrEqual]) + state = stateReadingCondition + newPos += 2 + + // Less-than or equal + case currentRune == '<' && strings.HasPrefix(remainingFilter, "<="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterLessOrEqual, nil, FilterMap[FilterLessOrEqual]) + state = stateReadingCondition + newPos += 2 + + // Approx + case currentRune == '~' && strings.HasPrefix(remainingFilter, "~="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterApproxMatch, nil, FilterMap[FilterApproxMatch]) + state = stateReadingCondition + newPos += 2 + + // Still reading the attribute name + default: + attribute.WriteRune(currentRune) + newPos += currentWidth + } + + case stateReadingExtensibleMatchingRule: + switch { + + // Matching rule OID is done + case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="): + state = stateReadingCondition + newPos += 2 + + // Still reading the matching rule oid + default: + extensibleMatchingRule.WriteRune(currentRune) + newPos += currentWidth + } + + case stateReadingCondition: + // append to the condition + condition.WriteRune(currentRune) + newPos += currentWidth + } + } + + if newPos == len(filter) { + err = NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter")) + return packet, newPos, err + } + if packet == nil { + err = NewError(ErrorFilterCompile, errors.New("ldap: error parsing filter")) + return packet, newPos, err + } + + switch { + case packet.Tag == FilterExtensibleMatch: + // MatchingRuleAssertion ::= SEQUENCE { + // matchingRule [1] MatchingRuleID OPTIONAL, + // type [2] AttributeDescription OPTIONAL, + // matchValue [3] AssertionValue, + // dnAttributes [4] BOOLEAN DEFAULT FALSE + // } + + // Include the matching rule oid, if specified + if extensibleMatchingRule.Len() > 0 { + packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchingRule, extensibleMatchingRule.String(), MatchingRuleAssertionMap[MatchingRuleAssertionMatchingRule])) + } + + // Include the attribute, if specified + if attribute.Len() > 0 { + packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionType, attribute.String(), MatchingRuleAssertionMap[MatchingRuleAssertionType])) + } + + // Add the value (only required child) + encodedString, encodeErr := decodeEscapedSymbols(condition.Bytes()) + if encodeErr != nil { + return packet, newPos, encodeErr + } + packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchValue, encodedString, MatchingRuleAssertionMap[MatchingRuleAssertionMatchValue])) + + // Defaults to false, so only include in the sequence if true + if extensibleDNAttributes { + packet.AppendChild(ber.NewBoolean(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionDNAttributes, extensibleDNAttributes, MatchingRuleAssertionMap[MatchingRuleAssertionDNAttributes])) + } + + case packet.Tag == FilterEqualityMatch && bytes.Equal(condition.Bytes(), _SymbolAny): + packet = ber.NewString(ber.ClassContext, ber.TypePrimitive, FilterPresent, attribute.String(), FilterMap[FilterPresent]) + case packet.Tag == FilterEqualityMatch && bytes.Contains(condition.Bytes(), _SymbolAny): + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute.String(), "Attribute")) + packet.Tag = FilterSubstrings + packet.Description = FilterMap[uint64(packet.Tag)] + seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Substrings") + parts := bytes.Split(condition.Bytes(), _SymbolAny) + for i, part := range parts { + if len(part) == 0 { + continue + } + var tag ber.Tag + switch i { + case 0: + tag = FilterSubstringsInitial + case len(parts) - 1: + tag = FilterSubstringsFinal + default: + tag = FilterSubstringsAny + } + encodedString, encodeErr := decodeEscapedSymbols(part) + if encodeErr != nil { + return packet, newPos, encodeErr + } + seq.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, tag, encodedString, FilterSubstringsMap[uint64(tag)])) + } + packet.AppendChild(seq) + default: + encodedString, encodeErr := decodeEscapedSymbols(condition.Bytes()) + if encodeErr != nil { + return packet, newPos, encodeErr + } + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute.String(), "Attribute")) + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, encodedString, "Condition")) + } + + newPos += currentWidth + return packet, newPos, err + } +} + +// Convert from "ABC\xx\xx\xx" form to literal bytes for transport +func decodeEscapedSymbols(src []byte) (string, error) { + var ( + buffer bytes.Buffer + offset int + reader = bytes.NewReader(src) + byteHex []byte + byteVal []byte + ) + + for { + runeVal, runeSize, err := reader.ReadRune() + if err == io.EOF { + return buffer.String(), nil + } else if err != nil { + return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: failed to read filter: %v", err)) + } else if runeVal == unicode.ReplacementChar { + return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", offset)) + } + + if runeVal == '\\' { + // http://tools.ietf.org/search/rfc4515 + // \ (%x5C) is not a valid character unless it is followed by two HEX characters due to not + // being a member of UTF1SUBSET. + if byteHex == nil { + byteHex = make([]byte, 2) + byteVal = make([]byte, 1) + } + + if _, err := io.ReadFull(reader, byteHex); err != nil { + if err == io.ErrUnexpectedEOF { + return "", NewError(ErrorFilterCompile, errors.New("ldap: missing characters for escape in filter")) + } + return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: invalid characters for escape in filter: %v", err)) + } + + if _, err := hexpac.Decode(byteVal, byteHex); err != nil { + return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: invalid characters for escape in filter: %v", err)) + } + + buffer.Write(byteVal) + } else { + buffer.WriteRune(runeVal) + } + + offset += runeSize + } +} diff --git a/vendor/github.com/go-ldap/ldap/v3/ldap.go b/vendor/github.com/go-ldap/ldap/v3/ldap.go new file mode 100644 index 0000000000000..fb75a89c43c7e --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/ldap.go @@ -0,0 +1,347 @@ +package ldap + +import ( + "fmt" + "io/ioutil" + "log" + "os" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +// LDAP Application Codes +const ( + ApplicationBindRequest = 0 + ApplicationBindResponse = 1 + ApplicationUnbindRequest = 2 + ApplicationSearchRequest = 3 + ApplicationSearchResultEntry = 4 + ApplicationSearchResultDone = 5 + ApplicationModifyRequest = 6 + ApplicationModifyResponse = 7 + ApplicationAddRequest = 8 + ApplicationAddResponse = 9 + ApplicationDelRequest = 10 + ApplicationDelResponse = 11 + ApplicationModifyDNRequest = 12 + ApplicationModifyDNResponse = 13 + ApplicationCompareRequest = 14 + ApplicationCompareResponse = 15 + ApplicationAbandonRequest = 16 + ApplicationSearchResultReference = 19 + ApplicationExtendedRequest = 23 + ApplicationExtendedResponse = 24 +) + +// ApplicationMap contains human readable descriptions of LDAP Application Codes +var ApplicationMap = map[uint8]string{ + ApplicationBindRequest: "Bind Request", + ApplicationBindResponse: "Bind Response", + ApplicationUnbindRequest: "Unbind Request", + ApplicationSearchRequest: "Search Request", + ApplicationSearchResultEntry: "Search Result Entry", + ApplicationSearchResultDone: "Search Result Done", + ApplicationModifyRequest: "Modify Request", + ApplicationModifyResponse: "Modify Response", + ApplicationAddRequest: "Add Request", + ApplicationAddResponse: "Add Response", + ApplicationDelRequest: "Del Request", + ApplicationDelResponse: "Del Response", + ApplicationModifyDNRequest: "Modify DN Request", + ApplicationModifyDNResponse: "Modify DN Response", + ApplicationCompareRequest: "Compare Request", + ApplicationCompareResponse: "Compare Response", + ApplicationAbandonRequest: "Abandon Request", + ApplicationSearchResultReference: "Search Result Reference", + ApplicationExtendedRequest: "Extended Request", + ApplicationExtendedResponse: "Extended Response", +} + +// Ldap Behera Password Policy Draft 10 (https://tools.ietf.org/html/draft-behera-ldap-password-policy-10) +const ( + BeheraPasswordExpired = 0 + BeheraAccountLocked = 1 + BeheraChangeAfterReset = 2 + BeheraPasswordModNotAllowed = 3 + BeheraMustSupplyOldPassword = 4 + BeheraInsufficientPasswordQuality = 5 + BeheraPasswordTooShort = 6 + BeheraPasswordTooYoung = 7 + BeheraPasswordInHistory = 8 +) + +// BeheraPasswordPolicyErrorMap contains human readable descriptions of Behera Password Policy error codes +var BeheraPasswordPolicyErrorMap = map[int8]string{ + BeheraPasswordExpired: "Password expired", + BeheraAccountLocked: "Account locked", + BeheraChangeAfterReset: "Password must be changed", + BeheraPasswordModNotAllowed: "Policy prevents password modification", + BeheraMustSupplyOldPassword: "Policy requires old password in order to change password", + BeheraInsufficientPasswordQuality: "Password fails quality checks", + BeheraPasswordTooShort: "Password is too short for policy", + BeheraPasswordTooYoung: "Password has been changed too recently", + BeheraPasswordInHistory: "New password is in list of old passwords", +} + +var logger = log.New(os.Stderr, "", log.LstdFlags) + +// Logger allows clients to override the default logger +func Logger(l *log.Logger) { + logger = l +} + +// Adds descriptions to an LDAP Response packet for debugging +func addLDAPDescriptions(packet *ber.Packet) (err error) { + defer func() { + if r := recover(); r != nil { + err = NewError(ErrorDebugging, fmt.Errorf("ldap: cannot process packet to add descriptions: %s", r)) + } + }() + packet.Description = "LDAP Response" + packet.Children[0].Description = "Message ID" + + application := uint8(packet.Children[1].Tag) + packet.Children[1].Description = ApplicationMap[application] + + switch application { + case ApplicationBindRequest: + err = addRequestDescriptions(packet) + case ApplicationBindResponse: + err = addDefaultLDAPResponseDescriptions(packet) + case ApplicationUnbindRequest: + err = addRequestDescriptions(packet) + case ApplicationSearchRequest: + err = addRequestDescriptions(packet) + case ApplicationSearchResultEntry: + packet.Children[1].Children[0].Description = "Object Name" + packet.Children[1].Children[1].Description = "Attributes" + for _, child := range packet.Children[1].Children[1].Children { + child.Description = "Attribute" + child.Children[0].Description = "Attribute Name" + child.Children[1].Description = "Attribute Values" + for _, grandchild := range child.Children[1].Children { + grandchild.Description = "Attribute Value" + } + } + if len(packet.Children) == 3 { + err = addControlDescriptions(packet.Children[2]) + } + case ApplicationSearchResultDone: + err = addDefaultLDAPResponseDescriptions(packet) + case ApplicationModifyRequest: + err = addRequestDescriptions(packet) + case ApplicationModifyResponse: + case ApplicationAddRequest: + err = addRequestDescriptions(packet) + case ApplicationAddResponse: + case ApplicationDelRequest: + err = addRequestDescriptions(packet) + case ApplicationDelResponse: + case ApplicationModifyDNRequest: + err = addRequestDescriptions(packet) + case ApplicationModifyDNResponse: + case ApplicationCompareRequest: + err = addRequestDescriptions(packet) + case ApplicationCompareResponse: + case ApplicationAbandonRequest: + err = addRequestDescriptions(packet) + case ApplicationSearchResultReference: + case ApplicationExtendedRequest: + err = addRequestDescriptions(packet) + case ApplicationExtendedResponse: + } + + return err +} + +func addControlDescriptions(packet *ber.Packet) error { + packet.Description = "Controls" + for _, child := range packet.Children { + var value *ber.Packet + controlType := "" + child.Description = "Control" + switch len(child.Children) { + case 0: + // at least one child is required for control type + return fmt.Errorf("at least one child is required for control type") + + case 1: + // just type, no criticality or value + controlType = child.Children[0].Value.(string) + child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")" + + case 2: + controlType = child.Children[0].Value.(string) + child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")" + // Children[1] could be criticality or value (both are optional) + // duck-type on whether this is a boolean + if _, ok := child.Children[1].Value.(bool); ok { + child.Children[1].Description = "Criticality" + } else { + child.Children[1].Description = "Control Value" + value = child.Children[1] + } + + case 3: + // criticality and value present + controlType = child.Children[0].Value.(string) + child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")" + child.Children[1].Description = "Criticality" + child.Children[2].Description = "Control Value" + value = child.Children[2] + + default: + // more than 3 children is invalid + return fmt.Errorf("more than 3 children for control packet found") + } + + if value == nil { + continue + } + switch controlType { + case ControlTypePaging: + value.Description += " (Paging)" + if value.Value != nil { + valueChildren, err := ber.DecodePacketErr(value.Data.Bytes()) + if err != nil { + return fmt.Errorf("failed to decode data bytes: %s", err) + } + value.Data.Truncate(0) + value.Value = nil + valueChildren.Children[1].Value = valueChildren.Children[1].Data.Bytes() + value.AppendChild(valueChildren) + } + value.Children[0].Description = "Real Search Control Value" + value.Children[0].Children[0].Description = "Paging Size" + value.Children[0].Children[1].Description = "Cookie" + + case ControlTypeBeheraPasswordPolicy: + value.Description += " (Password Policy - Behera Draft)" + if value.Value != nil { + valueChildren, err := ber.DecodePacketErr(value.Data.Bytes()) + if err != nil { + return fmt.Errorf("failed to decode data bytes: %s", err) + } + value.Data.Truncate(0) + value.Value = nil + value.AppendChild(valueChildren) + } + sequence := value.Children[0] + for _, child := range sequence.Children { + if child.Tag == 0 { + // Warning + warningPacket := child.Children[0] + val, err := ber.ParseInt64(warningPacket.Data.Bytes()) + if err != nil { + return fmt.Errorf("failed to decode data bytes: %s", err) + } + if warningPacket.Tag == 0 { + // timeBeforeExpiration + value.Description += " (TimeBeforeExpiration)" + warningPacket.Value = val + } else if warningPacket.Tag == 1 { + // graceAuthNsRemaining + value.Description += " (GraceAuthNsRemaining)" + warningPacket.Value = val + } + } else if child.Tag == 1 { + // Error + bs := child.Data.Bytes() + if len(bs) != 1 || bs[0] > 8 { + return fmt.Errorf("failed to decode data bytes: %s", "invalid PasswordPolicyResponse enum value") + } + val := int8(bs[0]) + child.Description = "Error" + child.Value = val + } + } + } + } + return nil +} + +func addRequestDescriptions(packet *ber.Packet) error { + packet.Description = "LDAP Request" + packet.Children[0].Description = "Message ID" + packet.Children[1].Description = ApplicationMap[uint8(packet.Children[1].Tag)] + if len(packet.Children) == 3 { + return addControlDescriptions(packet.Children[2]) + } + return nil +} + +func addDefaultLDAPResponseDescriptions(packet *ber.Packet) error { + resultCode := uint16(LDAPResultSuccess) + matchedDN := "" + description := "Success" + if err := GetLDAPError(packet); err != nil { + resultCode = err.(*Error).ResultCode + matchedDN = err.(*Error).MatchedDN + description = "Error Message" + } + + packet.Children[1].Children[0].Description = "Result Code (" + LDAPResultCodeMap[resultCode] + ")" + packet.Children[1].Children[1].Description = "Matched DN (" + matchedDN + ")" + packet.Children[1].Children[2].Description = description + if len(packet.Children[1].Children) > 3 { + packet.Children[1].Children[3].Description = "Referral" + } + if len(packet.Children) == 3 { + return addControlDescriptions(packet.Children[2]) + } + return nil +} + +// DebugBinaryFile reads and prints packets from the given filename +func DebugBinaryFile(fileName string) error { + file, err := ioutil.ReadFile(fileName) + if err != nil { + return NewError(ErrorDebugging, err) + } + ber.PrintBytes(os.Stdout, file, "") + packet, err := ber.DecodePacketErr(file) + if err != nil { + return fmt.Errorf("failed to decode packet: %s", err) + } + if err := addLDAPDescriptions(packet); err != nil { + return err + } + ber.PrintPacket(packet) + + return nil +} + +var hex = "0123456789abcdef" + +func mustEscape(c byte) bool { + return c > 0x7f || c == '(' || c == ')' || c == '\\' || c == '*' || c == 0 +} + +// EscapeFilter escapes from the provided LDAP filter string the special +// characters in the set `()*\` and those out of the range 0 < c < 0x80, +// as defined in RFC4515. +func EscapeFilter(filter string) string { + escape := 0 + for i := 0; i < len(filter); i++ { + if mustEscape(filter[i]) { + escape++ + } + } + if escape == 0 { + return filter + } + buf := make([]byte, len(filter)+escape*2) + for i, j := 0, 0; i < len(filter); i++ { + c := filter[i] + if mustEscape(c) { + buf[j+0] = '\\' + buf[j+1] = hex[c>>4] + buf[j+2] = hex[c&0xf] + j += 3 + } else { + buf[j] = c + j++ + } + } + return string(buf) +} diff --git a/vendor/github.com/go-ldap/ldap/v3/moddn.go b/vendor/github.com/go-ldap/ldap/v3/moddn.go new file mode 100644 index 0000000000000..ec246d1fa4c47 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/moddn.go @@ -0,0 +1,98 @@ +package ldap + +import ( + ber "github.com/go-asn1-ber/asn1-ber" +) + +// ModifyDNRequest holds the request to modify a DN +type ModifyDNRequest struct { + DN string + NewRDN string + DeleteOldRDN bool + NewSuperior string + // Controls hold optional controls to send with the request + Controls []Control +} + +// NewModifyDNRequest creates a new request which can be passed to ModifyDN(). +// +// To move an object in the tree, set the "newSup" to the new parent entry DN. Use an +// empty string for just changing the object's RDN. +// +// For moving the object without renaming, the "rdn" must be the first +// RDN of the given DN. +// +// A call like +// mdnReq := NewModifyDNRequest("uid=someone,dc=example,dc=org", "uid=newname", true, "") +// will setup the request to just rename uid=someone,dc=example,dc=org to +// uid=newname,dc=example,dc=org. +func NewModifyDNRequest(dn string, rdn string, delOld bool, newSup string) *ModifyDNRequest { + return &ModifyDNRequest{ + DN: dn, + NewRDN: rdn, + DeleteOldRDN: delOld, + NewSuperior: newSup, + } +} + +// NewModifyDNWithControlsRequest creates a new request which can be passed to ModifyDN() +// and also allows setting LDAP request controls. +// +// Refer NewModifyDNRequest for other parameters +func NewModifyDNWithControlsRequest(dn string, rdn string, delOld bool, + newSup string, controls []Control) *ModifyDNRequest { + return &ModifyDNRequest{ + DN: dn, + NewRDN: rdn, + DeleteOldRDN: delOld, + NewSuperior: newSup, + Controls: controls, + } +} + +func (req *ModifyDNRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyDNRequest, nil, "Modify DN Request") + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN")) + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.NewRDN, "New RDN")) + if req.DeleteOldRDN { + buf := []byte{0xff} + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, string(buf), "Delete old RDN")) + } else { + pkt.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, req.DeleteOldRDN, "Delete old RDN")) + } + if req.NewSuperior != "" { + pkt.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, req.NewSuperior, "New Superior")) + } + + envelope.AppendChild(pkt) + if len(req.Controls) > 0 { + envelope.AppendChild(encodeControls(req.Controls)) + } + + return nil +} + +// ModifyDN renames the given DN and optionally move to another base (when the "newSup" argument +// to NewModifyDNRequest() is not ""). +func (l *Conn) ModifyDN(m *ModifyDNRequest) error { + msgCtx, err := l.doRequest(m) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + packet, err := l.readPacket(msgCtx) + if err != nil { + return err + } + + if packet.Children[1].Tag == ApplicationModifyDNResponse { + err := GetLDAPError(packet) + if err != nil { + return err + } + } else { + logger.Printf("Unexpected Response: %d", packet.Children[1].Tag) + } + return nil +} diff --git a/vendor/github.com/go-ldap/ldap/v3/modify.go b/vendor/github.com/go-ldap/ldap/v3/modify.go new file mode 100644 index 0000000000000..f6fe38593711a --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/modify.go @@ -0,0 +1,176 @@ +package ldap + +import ( + "errors" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +// Change operation choices +const ( + AddAttribute = 0 + DeleteAttribute = 1 + ReplaceAttribute = 2 + IncrementAttribute = 3 // (https://tools.ietf.org/html/rfc4525) +) + +// PartialAttribute for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511 +type PartialAttribute struct { + // Type is the type of the partial attribute + Type string + // Vals are the values of the partial attribute + Vals []string +} + +func (p *PartialAttribute) encode() *ber.Packet { + seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "PartialAttribute") + seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, p.Type, "Type")) + set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue") + for _, value := range p.Vals { + set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals")) + } + seq.AppendChild(set) + return seq +} + +// Change for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511 +type Change struct { + // Operation is the type of change to be made + Operation uint + // Modification is the attribute to be modified + Modification PartialAttribute +} + +func (c *Change) encode() *ber.Packet { + change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change") + change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(c.Operation), "Operation")) + change.AppendChild(c.Modification.encode()) + return change +} + +// ModifyRequest as defined in https://tools.ietf.org/html/rfc4511 +type ModifyRequest struct { + // DN is the distinguishedName of the directory entry to modify + DN string + // Changes contain the attributes to modify + Changes []Change + // Controls hold optional controls to send with the request + Controls []Control +} + +// Add appends the given attribute to the list of changes to be made +func (req *ModifyRequest) Add(attrType string, attrVals []string) { + req.appendChange(AddAttribute, attrType, attrVals) +} + +// Delete appends the given attribute to the list of changes to be made +func (req *ModifyRequest) Delete(attrType string, attrVals []string) { + req.appendChange(DeleteAttribute, attrType, attrVals) +} + +// Replace appends the given attribute to the list of changes to be made +func (req *ModifyRequest) Replace(attrType string, attrVals []string) { + req.appendChange(ReplaceAttribute, attrType, attrVals) +} + +// Increment appends the given attribute to the list of changes to be made +func (req *ModifyRequest) Increment(attrType string, attrVal string) { + req.appendChange(IncrementAttribute, attrType, []string{attrVal}) +} + +func (req *ModifyRequest) appendChange(operation uint, attrType string, attrVals []string) { + req.Changes = append(req.Changes, Change{operation, PartialAttribute{Type: attrType, Vals: attrVals}}) +} + +func (req *ModifyRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyRequest, nil, "Modify Request") + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN")) + changes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Changes") + for _, change := range req.Changes { + changes.AppendChild(change.encode()) + } + pkt.AppendChild(changes) + + envelope.AppendChild(pkt) + if len(req.Controls) > 0 { + envelope.AppendChild(encodeControls(req.Controls)) + } + + return nil +} + +// NewModifyRequest creates a modify request for the given DN +func NewModifyRequest(dn string, controls []Control) *ModifyRequest { + return &ModifyRequest{ + DN: dn, + Controls: controls, + } +} + +// Modify performs the ModifyRequest +func (l *Conn) Modify(modifyRequest *ModifyRequest) error { + msgCtx, err := l.doRequest(modifyRequest) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + packet, err := l.readPacket(msgCtx) + if err != nil { + return err + } + + if packet.Children[1].Tag == ApplicationModifyResponse { + err := GetLDAPError(packet) + if err != nil { + return err + } + } else { + logger.Printf("Unexpected Response: %d", packet.Children[1].Tag) + } + return nil +} + +// ModifyResult holds the server's response to a modify request +type ModifyResult struct { + // Controls are the returned controls + Controls []Control +} + +// ModifyWithResult performs the ModifyRequest and returns the result +func (l *Conn) ModifyWithResult(modifyRequest *ModifyRequest) (*ModifyResult, error) { + msgCtx, err := l.doRequest(modifyRequest) + if err != nil { + return nil, err + } + defer l.finishMessage(msgCtx) + + result := &ModifyResult{ + Controls: make([]Control, 0), + } + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packet, err := l.readPacket(msgCtx) + if err != nil { + return nil, err + } + + switch packet.Children[1].Tag { + case ApplicationModifyResponse: + err := GetLDAPError(packet) + if err != nil { + return nil, err + } + if len(packet.Children) == 3 { + for _, child := range packet.Children[2].Children { + decodedChild, err := DecodeControl(child) + if err != nil { + return nil, errors.New("failed to decode child control: " + err.Error()) + } + result.Controls = append(result.Controls, decodedChild) + } + } + } + l.Debug.Printf("%d: returning", msgCtx.id) + return result, nil +} diff --git a/vendor/github.com/go-ldap/ldap/v3/passwdmodify.go b/vendor/github.com/go-ldap/ldap/v3/passwdmodify.go new file mode 100644 index 0000000000000..62a110843d81b --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/passwdmodify.go @@ -0,0 +1,126 @@ +package ldap + +import ( + "fmt" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +const ( + passwordModifyOID = "1.3.6.1.4.1.4203.1.11.1" +) + +// PasswordModifyRequest implements the Password Modify Extended Operation as defined in https://www.ietf.org/rfc/rfc3062.txt +type PasswordModifyRequest struct { + // UserIdentity is an optional string representation of the user associated with the request. + // This string may or may not be an LDAPDN [RFC2253]. + // If no UserIdentity field is present, the request acts up upon the password of the user currently associated with the LDAP session + UserIdentity string + // OldPassword, if present, contains the user's current password + OldPassword string + // NewPassword, if present, contains the desired password for this user + NewPassword string +} + +// PasswordModifyResult holds the server response to a PasswordModifyRequest +type PasswordModifyResult struct { + // GeneratedPassword holds a password generated by the server, if present + GeneratedPassword string + // Referral are the returned referral + Referral string +} + +func (req *PasswordModifyRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Password Modify Extended Operation") + pkt.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, passwordModifyOID, "Extended Request Name: Password Modify OID")) + + extendedRequestValue := ber.Encode(ber.ClassContext, ber.TypePrimitive, 1, nil, "Extended Request Value: Password Modify Request") + passwordModifyRequestValue := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Password Modify Request") + if req.UserIdentity != "" { + passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, req.UserIdentity, "User Identity")) + } + if req.OldPassword != "" { + passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 1, req.OldPassword, "Old Password")) + } + if req.NewPassword != "" { + passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 2, req.NewPassword, "New Password")) + } + extendedRequestValue.AppendChild(passwordModifyRequestValue) + + pkt.AppendChild(extendedRequestValue) + + envelope.AppendChild(pkt) + + return nil +} + +// NewPasswordModifyRequest creates a new PasswordModifyRequest +// +// According to the RFC 3602 (https://tools.ietf.org/html/rfc3062): +// userIdentity is a string representing the user associated with the request. +// This string may or may not be an LDAPDN (RFC 2253). +// If userIdentity is empty then the operation will act on the user associated +// with the session. +// +// oldPassword is the current user's password, it can be empty or it can be +// needed depending on the session user access rights (usually an administrator +// can change a user's password without knowing the current one) and the +// password policy (see pwdSafeModify password policy's attribute) +// +// newPassword is the desired user's password. If empty the server can return +// an error or generate a new password that will be available in the +// PasswordModifyResult.GeneratedPassword +// +func NewPasswordModifyRequest(userIdentity string, oldPassword string, newPassword string) *PasswordModifyRequest { + return &PasswordModifyRequest{ + UserIdentity: userIdentity, + OldPassword: oldPassword, + NewPassword: newPassword, + } +} + +// PasswordModify performs the modification request +func (l *Conn) PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error) { + msgCtx, err := l.doRequest(passwordModifyRequest) + if err != nil { + return nil, err + } + defer l.finishMessage(msgCtx) + + packet, err := l.readPacket(msgCtx) + if err != nil { + return nil, err + } + + result := &PasswordModifyResult{} + + if packet.Children[1].Tag == ApplicationExtendedResponse { + err := GetLDAPError(packet) + if err != nil { + if IsErrorWithCode(err, LDAPResultReferral) { + for _, child := range packet.Children[1].Children { + if child.Tag == 3 { + result.Referral = child.Children[0].Value.(string) + } + } + } + return result, err + } + } else { + return nil, NewError(ErrorUnexpectedResponse, fmt.Errorf("unexpected Response: %d", packet.Children[1].Tag)) + } + + extendedResponse := packet.Children[1] + for _, child := range extendedResponse.Children { + if child.Tag == 11 { + passwordModifyResponseValue := ber.DecodePacket(child.Data.Bytes()) + if len(passwordModifyResponseValue.Children) == 1 { + if passwordModifyResponseValue.Children[0].Tag == 0 { + result.GeneratedPassword = ber.DecodeString(passwordModifyResponseValue.Children[0].Data.Bytes()) + } + } + } + } + + return result, nil +} diff --git a/vendor/github.com/go-ldap/ldap/v3/request.go b/vendor/github.com/go-ldap/ldap/v3/request.go new file mode 100644 index 0000000000000..4ea31e90404c6 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/request.go @@ -0,0 +1,71 @@ +package ldap + +import ( + "errors" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +var ( + errRespChanClosed = errors.New("ldap: response channel closed") + errCouldNotRetMsg = errors.New("ldap: could not retrieve message") + ErrNilConnection = errors.New("ldap: conn is nil, expected net.Conn") +) + +type request interface { + appendTo(*ber.Packet) error +} + +type requestFunc func(*ber.Packet) error + +func (f requestFunc) appendTo(p *ber.Packet) error { + return f(p) +} + +func (l *Conn) doRequest(req request) (*messageContext, error) { + if l == nil || l.conn == nil { + return nil, ErrNilConnection + } + + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + if err := req.appendTo(packet); err != nil { + return nil, err + } + + if l.Debug { + l.Debug.PrintPacket(packet) + } + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return nil, err + } + l.Debug.Printf("%d: returning", msgCtx.id) + return msgCtx, nil +} + +func (l *Conn) readPacket(msgCtx *messageContext) (*ber.Packet, error) { + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return nil, NewError(ErrorNetwork, errRespChanClosed) + } + packet, err := packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return nil, err + } + + if packet == nil { + return nil, NewError(ErrorNetwork, errCouldNotRetMsg) + } + + if l.Debug { + if err = addLDAPDescriptions(packet); err != nil { + return nil, err + } + l.Debug.PrintPacket(packet) + } + return packet, nil +} diff --git a/vendor/github.com/go-ldap/ldap/v3/search.go b/vendor/github.com/go-ldap/ldap/v3/search.go new file mode 100644 index 0000000000000..35fc2497a98b9 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/search.go @@ -0,0 +1,424 @@ +package ldap + +import ( + "errors" + "fmt" + "sort" + "strings" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +// scope choices +const ( + ScopeBaseObject = 0 + ScopeSingleLevel = 1 + ScopeWholeSubtree = 2 +) + +// ScopeMap contains human readable descriptions of scope choices +var ScopeMap = map[int]string{ + ScopeBaseObject: "Base Object", + ScopeSingleLevel: "Single Level", + ScopeWholeSubtree: "Whole Subtree", +} + +// derefAliases +const ( + NeverDerefAliases = 0 + DerefInSearching = 1 + DerefFindingBaseObj = 2 + DerefAlways = 3 +) + +// DerefMap contains human readable descriptions of derefAliases choices +var DerefMap = map[int]string{ + NeverDerefAliases: "NeverDerefAliases", + DerefInSearching: "DerefInSearching", + DerefFindingBaseObj: "DerefFindingBaseObj", + DerefAlways: "DerefAlways", +} + +// NewEntry returns an Entry object with the specified distinguished name and attribute key-value pairs. +// The map of attributes is accessed in alphabetical order of the keys in order to ensure that, for the +// same input map of attributes, the output entry will contain the same order of attributes +func NewEntry(dn string, attributes map[string][]string) *Entry { + var attributeNames []string + for attributeName := range attributes { + attributeNames = append(attributeNames, attributeName) + } + sort.Strings(attributeNames) + + var encodedAttributes []*EntryAttribute + for _, attributeName := range attributeNames { + encodedAttributes = append(encodedAttributes, NewEntryAttribute(attributeName, attributes[attributeName])) + } + return &Entry{ + DN: dn, + Attributes: encodedAttributes, + } +} + +// Entry represents a single search result entry +type Entry struct { + // DN is the distinguished name of the entry + DN string + // Attributes are the returned attributes for the entry + Attributes []*EntryAttribute +} + +// GetAttributeValues returns the values for the named attribute, or an empty list +func (e *Entry) GetAttributeValues(attribute string) []string { + for _, attr := range e.Attributes { + if attr.Name == attribute { + return attr.Values + } + } + return []string{} +} + +// GetEqualFoldAttributeValues returns the values for the named attribute, or an +// empty list. Attribute matching is done with strings.EqualFold. +func (e *Entry) GetEqualFoldAttributeValues(attribute string) []string { + for _, attr := range e.Attributes { + if strings.EqualFold(attribute, attr.Name) { + return attr.Values + } + } + return []string{} +} + +// GetRawAttributeValues returns the byte values for the named attribute, or an empty list +func (e *Entry) GetRawAttributeValues(attribute string) [][]byte { + for _, attr := range e.Attributes { + if attr.Name == attribute { + return attr.ByteValues + } + } + return [][]byte{} +} + +// GetEqualFoldRawAttributeValues returns the byte values for the named attribute, or an empty list +func (e *Entry) GetEqualFoldRawAttributeValues(attribute string) [][]byte { + for _, attr := range e.Attributes { + if strings.EqualFold(attr.Name, attribute) { + return attr.ByteValues + } + } + return [][]byte{} +} + +// GetAttributeValue returns the first value for the named attribute, or "" +func (e *Entry) GetAttributeValue(attribute string) string { + values := e.GetAttributeValues(attribute) + if len(values) == 0 { + return "" + } + return values[0] +} + +// GetEqualFoldAttributeValue returns the first value for the named attribute, or "". +// Attribute comparison is done with strings.EqualFold. +func (e *Entry) GetEqualFoldAttributeValue(attribute string) string { + values := e.GetEqualFoldAttributeValues(attribute) + if len(values) == 0 { + return "" + } + return values[0] +} + +// GetRawAttributeValue returns the first value for the named attribute, or an empty slice +func (e *Entry) GetRawAttributeValue(attribute string) []byte { + values := e.GetRawAttributeValues(attribute) + if len(values) == 0 { + return []byte{} + } + return values[0] +} + +// GetEqualFoldRawAttributeValue returns the first value for the named attribute, or an empty slice +func (e *Entry) GetEqualFoldRawAttributeValue(attribute string) []byte { + values := e.GetEqualFoldRawAttributeValues(attribute) + if len(values) == 0 { + return []byte{} + } + return values[0] +} + +// Print outputs a human-readable description +func (e *Entry) Print() { + fmt.Printf("DN: %s\n", e.DN) + for _, attr := range e.Attributes { + attr.Print() + } +} + +// PrettyPrint outputs a human-readable description indenting +func (e *Entry) PrettyPrint(indent int) { + fmt.Printf("%sDN: %s\n", strings.Repeat(" ", indent), e.DN) + for _, attr := range e.Attributes { + attr.PrettyPrint(indent + 2) + } +} + +// NewEntryAttribute returns a new EntryAttribute with the desired key-value pair +func NewEntryAttribute(name string, values []string) *EntryAttribute { + var bytes [][]byte + for _, value := range values { + bytes = append(bytes, []byte(value)) + } + return &EntryAttribute{ + Name: name, + Values: values, + ByteValues: bytes, + } +} + +// EntryAttribute holds a single attribute +type EntryAttribute struct { + // Name is the name of the attribute + Name string + // Values contain the string values of the attribute + Values []string + // ByteValues contain the raw values of the attribute + ByteValues [][]byte +} + +// Print outputs a human-readable description +func (e *EntryAttribute) Print() { + fmt.Printf("%s: %s\n", e.Name, e.Values) +} + +// PrettyPrint outputs a human-readable description with indenting +func (e *EntryAttribute) PrettyPrint(indent int) { + fmt.Printf("%s%s: %s\n", strings.Repeat(" ", indent), e.Name, e.Values) +} + +// SearchResult holds the server's response to a search request +type SearchResult struct { + // Entries are the returned entries + Entries []*Entry + // Referrals are the returned referrals + Referrals []string + // Controls are the returned controls + Controls []Control +} + +// Print outputs a human-readable description +func (s *SearchResult) Print() { + for _, entry := range s.Entries { + entry.Print() + } +} + +// PrettyPrint outputs a human-readable description with indenting +func (s *SearchResult) PrettyPrint(indent int) { + for _, entry := range s.Entries { + entry.PrettyPrint(indent) + } +} + +// SearchRequest represents a search request to send to the server +type SearchRequest struct { + BaseDN string + Scope int + DerefAliases int + SizeLimit int + TimeLimit int + TypesOnly bool + Filter string + Attributes []string + Controls []Control +} + +func (req *SearchRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationSearchRequest, nil, "Search Request") + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.BaseDN, "Base DN")) + pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(req.Scope), "Scope")) + pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(req.DerefAliases), "Deref Aliases")) + pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(req.SizeLimit), "Size Limit")) + pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(req.TimeLimit), "Time Limit")) + pkt.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, req.TypesOnly, "Types Only")) + // compile and encode filter + filterPacket, err := CompileFilter(req.Filter) + if err != nil { + return err + } + pkt.AppendChild(filterPacket) + // encode attributes + attributesPacket := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes") + for _, attribute := range req.Attributes { + attributesPacket.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute")) + } + pkt.AppendChild(attributesPacket) + + envelope.AppendChild(pkt) + if len(req.Controls) > 0 { + envelope.AppendChild(encodeControls(req.Controls)) + } + + return nil +} + +// NewSearchRequest creates a new search request +func NewSearchRequest( + BaseDN string, + Scope, DerefAliases, SizeLimit, TimeLimit int, + TypesOnly bool, + Filter string, + Attributes []string, + Controls []Control, +) *SearchRequest { + return &SearchRequest{ + BaseDN: BaseDN, + Scope: Scope, + DerefAliases: DerefAliases, + SizeLimit: SizeLimit, + TimeLimit: TimeLimit, + TypesOnly: TypesOnly, + Filter: Filter, + Attributes: Attributes, + Controls: Controls, + } +} + +// SearchWithPaging accepts a search request and desired page size in order to execute LDAP queries to fulfill the +// search request. All paged LDAP query responses will be buffered and the final result will be returned atomically. +// The following four cases are possible given the arguments: +// - given SearchRequest missing a control of type ControlTypePaging: we will add one with the desired paging size +// - given SearchRequest contains a control of type ControlTypePaging that isn't actually a ControlPaging: fail without issuing any queries +// - given SearchRequest contains a control of type ControlTypePaging with pagingSize equal to the size requested: no change to the search request +// - given SearchRequest contains a control of type ControlTypePaging with pagingSize not equal to the size requested: fail without issuing any queries +// A requested pagingSize of 0 is interpreted as no limit by LDAP servers. +func (l *Conn) SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error) { + var pagingControl *ControlPaging + + control := FindControl(searchRequest.Controls, ControlTypePaging) + if control == nil { + pagingControl = NewControlPaging(pagingSize) + searchRequest.Controls = append(searchRequest.Controls, pagingControl) + } else { + castControl, ok := control.(*ControlPaging) + if !ok { + return nil, fmt.Errorf("expected paging control to be of type *ControlPaging, got %v", control) + } + if castControl.PagingSize != pagingSize { + return nil, fmt.Errorf("paging size given in search request (%d) conflicts with size given in search call (%d)", castControl.PagingSize, pagingSize) + } + pagingControl = castControl + } + + searchResult := new(SearchResult) + for { + result, err := l.Search(searchRequest) + l.Debug.Printf("Looking for Paging Control...") + if err != nil { + return searchResult, err + } + if result == nil { + return searchResult, NewError(ErrorNetwork, errors.New("ldap: packet not received")) + } + + searchResult.Entries = append(searchResult.Entries, result.Entries...) + searchResult.Referrals = append(searchResult.Referrals, result.Referrals...) + searchResult.Controls = append(searchResult.Controls, result.Controls...) + + l.Debug.Printf("Looking for Paging Control...") + pagingResult := FindControl(result.Controls, ControlTypePaging) + if pagingResult == nil { + pagingControl = nil + l.Debug.Printf("Could not find paging control. Breaking...") + break + } + + cookie := pagingResult.(*ControlPaging).Cookie + if len(cookie) == 0 { + pagingControl = nil + l.Debug.Printf("Could not find cookie. Breaking...") + break + } + pagingControl.SetCookie(cookie) + } + + if pagingControl != nil { + l.Debug.Printf("Abandoning Paging...") + pagingControl.PagingSize = 0 + if _, err := l.Search(searchRequest); err != nil { + return searchResult, err + } + } + + return searchResult, nil +} + +// Search performs the given search request +func (l *Conn) Search(searchRequest *SearchRequest) (*SearchResult, error) { + msgCtx, err := l.doRequest(searchRequest) + if err != nil { + return nil, err + } + defer l.finishMessage(msgCtx) + + result := &SearchResult{ + Entries: make([]*Entry, 0), + Referrals: make([]string, 0), + Controls: make([]Control, 0), + } + + for { + packet, err := l.readPacket(msgCtx) + if err != nil { + return result, err + } + + switch packet.Children[1].Tag { + case 4: + entry := &Entry{ + DN: packet.Children[1].Children[0].Value.(string), + Attributes: unpackAttributes(packet.Children[1].Children[1].Children), + } + result.Entries = append(result.Entries, entry) + case 5: + err := GetLDAPError(packet) + if err != nil { + return result, err + } + if len(packet.Children) == 3 { + for _, child := range packet.Children[2].Children { + decodedChild, err := DecodeControl(child) + if err != nil { + return result, fmt.Errorf("failed to decode child control: %s", err) + } + result.Controls = append(result.Controls, decodedChild) + } + } + return result, nil + case 19: + result.Referrals = append(result.Referrals, packet.Children[1].Children[0].Value.(string)) + } + } +} + +// unpackAttributes will extract all given LDAP attributes and it's values +// from the ber.Packet +func unpackAttributes(children []*ber.Packet) []*EntryAttribute { + entries := make([]*EntryAttribute, len(children)) + for i, child := range children { + length := len(child.Children[1].Children) + entry := &EntryAttribute{ + Name: child.Children[0].Value.(string), + // pre-allocate the slice since we can determine + // the number of attributes at this point + Values: make([]string, length), + ByteValues: make([][]byte, length), + } + + for i, value := range child.Children[1].Children { + entry.ByteValues[i] = value.ByteValue + entry.Values[i] = value.Value.(string) + } + entries[i] = entry + } + + return entries +} diff --git a/vendor/github.com/go-ldap/ldap/v3/unbind.go b/vendor/github.com/go-ldap/ldap/v3/unbind.go new file mode 100644 index 0000000000000..6c411cd1d393a --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/unbind.go @@ -0,0 +1,37 @@ +package ldap + +import ( + "errors" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +var ErrConnUnbound = NewError(ErrorNetwork, errors.New("ldap: connection is closed")) + +type unbindRequest struct{} + +func (unbindRequest) appendTo(envelope *ber.Packet) error { + envelope.AppendChild(ber.Encode(ber.ClassApplication, ber.TypePrimitive, ApplicationUnbindRequest, nil, ApplicationMap[ApplicationUnbindRequest])) + return nil +} + +// Unbind will perform an unbind request. The Unbind operation +// should be thought of as the "quit" operation. +// See https://datatracker.ietf.org/doc/html/rfc4511#section-4.3 +func (l *Conn) Unbind() error { + if l.IsClosing() { + return ErrConnUnbound + } + + _, err := l.doRequest(unbindRequest{}) + if err != nil { + return err + } + + // Sending an unbindRequest will make the connection unusable. + // Pending requests will fail with: + // LDAP Result Code 200 "Network Error": ldap: response channel closed + l.Close() + + return nil +} diff --git a/vendor/github.com/go-ldap/ldap/v3/whoami.go b/vendor/github.com/go-ldap/ldap/v3/whoami.go new file mode 100644 index 0000000000000..10c523d082194 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/v3/whoami.go @@ -0,0 +1,91 @@ +package ldap + +// This file contains the "Who Am I?" extended operation as specified in rfc 4532 +// +// https://tools.ietf.org/html/rfc4532 + +import ( + "errors" + "fmt" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +type whoAmIRequest bool + +// WhoAmIResult is returned by the WhoAmI() call +type WhoAmIResult struct { + AuthzID string +} + +func (r whoAmIRequest) encode() (*ber.Packet, error) { + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Who Am I? Extended Operation") + request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, ControlTypeWhoAmI, "Extended Request Name: Who Am I? OID")) + return request, nil +} + +// WhoAmI returns the authzId the server thinks we are, you may pass controls +// like a Proxied Authorization control +func (l *Conn) WhoAmI(controls []Control) (*WhoAmIResult, error) { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + req := whoAmIRequest(true) + encodedWhoAmIRequest, err := req.encode() + if err != nil { + return nil, err + } + packet.AppendChild(encodedWhoAmIRequest) + + if len(controls) != 0 { + packet.AppendChild(encodeControls(controls)) + } + + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return nil, err + } + defer l.finishMessage(msgCtx) + + result := &WhoAmIResult{} + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return nil, err + } + + if packet == nil { + return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve message")) + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + return nil, err + } + ber.PrintPacket(packet) + } + + if packet.Children[1].Tag == ApplicationExtendedResponse { + if err := GetLDAPError(packet); err != nil { + return nil, err + } + } else { + return nil, NewError(ErrorUnexpectedResponse, fmt.Errorf("Unexpected Response: %d", packet.Children[1].Tag)) + } + + extendedResponse := packet.Children[1] + for _, child := range extendedResponse.Children { + if child.Tag == 11 { + result.AuthzID = ber.DecodeString(child.Data.Bytes()) + } + } + + return result, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/OWNERS b/vendor/github.com/onsi/ginkgo/v2/OWNERS new file mode 100644 index 0000000000000..2d1f6c71efbf5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/OWNERS @@ -0,0 +1,4 @@ +reviewers: +approvers: + - bertinatto + - stbenjam diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl_patch.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl_patch.go new file mode 100644 index 0000000000000..bf60ceb522dd2 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl_patch.go @@ -0,0 +1,33 @@ +package ginkgo + +import ( + "io" + + "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/internal/global" + "github.com/onsi/ginkgo/v2/types" +) + +func AppendSpecText(test *internal.Spec, text string) { + test.AppendText(text) +} + +func GetSuite() *internal.Suite { + return global.Suite +} + +func GetFailer() *internal.Failer { + return global.Failer +} + +func NewWriter(w io.Writer) *internal.Writer { + return internal.NewWriter(w) +} + +func GetWriter() *internal.Writer { + return GinkgoWriter.(*internal.Writer) +} + +func SetReporterConfig(r types.ReporterConfig) { + reporterConfig = r +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec_patch.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec_patch.go new file mode 100644 index 0000000000000..2d0bcc914dfff --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec_patch.go @@ -0,0 +1,22 @@ +package internal + +import ( + "github.com/onsi/ginkgo/v2/types" +) + +func (s Spec) CodeLocations() []types.CodeLocation { + return s.Nodes.CodeLocations() +} + +func (s Spec) AppendText(text string) { + s.Nodes[len(s.Nodes)-1].Text += text +} + +func (s Spec) Labels() []string { + var labels []string + for _, n := range s.Nodes { + labels = append(labels, n.Labels...) + } + + return labels +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index a3c9e6bf18f06..12e50b8a95bf6 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -65,6 +65,8 @@ type Suite struct { selectiveLock *sync.Mutex client parallel_support.Client + + annotateFn AnnotateFunc } func NewSuite() *Suite { @@ -110,6 +112,11 @@ func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string } ApplyNestedFocusPolicyToTree(suite.tree) specs := GenerateSpecsFromTreeRoot(suite.tree) + if suite.annotateFn != nil { + for _, spec := range specs { + suite.annotateFn(spec.Text(), spec) + } + } specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteConfig) suite.phase = PhaseRun diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite_patch.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite_patch.go new file mode 100644 index 0000000000000..29eae02832497 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite_patch.go @@ -0,0 +1,71 @@ +package internal + +import ( + "time" + + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +type AnnotateFunc func(testName string, test types.TestSpec) + +func (suite *Suite) SetAnnotateFn(fn AnnotateFunc) { + suite.annotateFn = fn +} + +func (suite *Suite) GetReport() types.Report { + return suite.report +} + +func (suite *Suite) WalkTests(fn AnnotateFunc) { + if suite.phase != PhaseBuildTree { + panic("cannot run before building the tree = call suite.BuildTree() first") + } + ApplyNestedFocusPolicyToTree(suite.tree) + specs := GenerateSpecsFromTreeRoot(suite.tree) + for _, spec := range specs { + fn(spec.Text(), spec) + } +} + +func (suite *Suite) InPhaseBuildTree() bool { + return suite.phase == PhaseBuildTree +} + +func (suite *Suite) ClearBeforeAndAfterSuiteNodes() { + // Don't build the tree multiple times, it results in multiple initing of tests + if !suite.InPhaseBuildTree() { + suite.BuildTree() + } + newNodes := Nodes{} + for _, node := range suite.suiteNodes { + if node.NodeType == types.NodeTypeBeforeSuite || node.NodeType == types.NodeTypeAfterSuite || node.NodeType == types.NodeTypeSynchronizedBeforeSuite || node.NodeType == types.NodeTypeSynchronizedAfterSuite { + continue + } + newNodes = append(newNodes, node) + } + suite.suiteNodes = newNodes +} + +func (suite *Suite) RunSpec(spec types.TestSpec, suiteLabels Labels, suiteDescription, suitePath string, failer *Failer, writer WriterInterface, suiteConfig types.SuiteConfig, reporterConfig types.ReporterConfig) (bool, bool) { + if suite.phase != PhaseBuildTree { + panic("cannot run before building the tree = call suite.BuildTree() first") + } + + suite.phase = PhaseRun + suite.client = nil + suite.failer = failer + suite.reporter = reporters.NewDefaultReporter(reporterConfig, writer) + suite.writer = writer + suite.outputInterceptor = NoopOutputInterceptor{} + if suite.config.Timeout > 0 { + suite.deadline = time.Now().Add(suiteConfig.Timeout) + } + suite.interruptHandler = interrupt_handler.NewInterruptHandler(nil) + suite.config = suiteConfig + + success := suite.runSpecs(suiteDescription, suiteLabels, suitePath, false, []Spec{spec.(Spec)}) + + return success, false +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types_patch.go b/vendor/github.com/onsi/ginkgo/v2/types/types_patch.go new file mode 100644 index 0000000000000..02d319bba041f --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/types_patch.go @@ -0,0 +1,8 @@ +package types + +type TestSpec interface { + CodeLocations() []CodeLocation + Text() string + AppendText(text string) + Labels() []string +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/LICENSE b/vendor/github.com/openshift-eng/openshift-tests-extension/LICENSE new file mode 100644 index 0000000000000..261eeb9e9f8b2 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmd.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmd.go new file mode 100644 index 0000000000000..5db1741be74ba --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmd.go @@ -0,0 +1,21 @@ +package cmd + +import ( + "github.com/spf13/cobra" + + "github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdinfo" + "github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdlist" + "github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun" + "github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdupdate" + "github.com/openshift-eng/openshift-tests-extension/pkg/extension" +) + +func DefaultExtensionCommands(registry *extension.Registry) []*cobra.Command { + return []*cobra.Command{ + cmdrun.NewRunSuiteCommand(registry), + cmdrun.NewRunTestCommand(registry), + cmdlist.NewListCommand(registry), + cmdinfo.NewInfoCommand(registry), + cmdupdate.NewUpdateCommand(registry), + } +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdinfo/info.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdinfo/info.go new file mode 100644 index 0000000000000..1d4237876d539 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdinfo/info.go @@ -0,0 +1,38 @@ +package cmdinfo + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/spf13/cobra" + + "github.com/openshift-eng/openshift-tests-extension/pkg/extension" + "github.com/openshift-eng/openshift-tests-extension/pkg/flags" +) + +func NewInfoCommand(registry *extension.Registry) *cobra.Command { + componentFlags := flags.NewComponentFlags() + + cmd := &cobra.Command{ + Use: "info", + Short: "Display extension metadata", + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + extension := registry.Get(componentFlags.Component) + if extension == nil { + return fmt.Errorf("couldn't find the component %q", componentFlags.Component) + } + + info, err := json.MarshalIndent(extension, "", " ") + if err != nil { + return err + } + + fmt.Fprintf(os.Stdout, "%s\n", string(info)) + return nil + }, + } + componentFlags.BindFlags(cmd.Flags()) + return cmd +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdlist/list.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdlist/list.go new file mode 100644 index 0000000000000..69f02660a0588 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdlist/list.go @@ -0,0 +1,124 @@ +package cmdlist + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + + "github.com/openshift-eng/openshift-tests-extension/pkg/extension" + "github.com/openshift-eng/openshift-tests-extension/pkg/flags" +) + +func NewListCommand(registry *extension.Registry) *cobra.Command { + opts := struct { + componentFlags *flags.ComponentFlags + suiteFlags *flags.SuiteFlags + outputFlags *flags.OutputFlags + }{ + suiteFlags: flags.NewSuiteFlags(), + componentFlags: flags.NewComponentFlags(), + outputFlags: flags.NewOutputFlags(), + } + + // Tests + listTestsCmd := &cobra.Command{ + Use: "tests", + Short: "List available tests", + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + ext := registry.Get(opts.componentFlags.Component) + if ext == nil { + return fmt.Errorf("component not found: %s", opts.componentFlags.Component) + } + + // Find suite, if specified + var foundSuite *extension.Suite + var err error + if opts.suiteFlags.Suite != "" { + foundSuite, err = ext.GetSuite(opts.suiteFlags.Suite) + if err != nil { + return err + } + } + + // Filter for suite + specs := ext.GetSpecs() + if foundSuite != nil { + specs, err = specs.Filter(foundSuite.Qualifiers) + if err != nil { + return err + } + } + + data, err := opts.outputFlags.Marshal(specs) + if err != nil { + return err + } + fmt.Fprintf(os.Stdout, "%s\n", string(data)) + return nil + }, + } + opts.suiteFlags.BindFlags(listTestsCmd.Flags()) + opts.componentFlags.BindFlags(listTestsCmd.Flags()) + opts.outputFlags.BindFlags(listTestsCmd.Flags()) + + // Suites + listSuitesCommand := &cobra.Command{ + Use: "suites", + Short: "List available suites", + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + ext := registry.Get(opts.componentFlags.Component) + if ext == nil { + return fmt.Errorf("component not found: %s", opts.componentFlags.Component) + } + + suites := ext.Suites + + data, err := opts.outputFlags.Marshal(suites) + if err != nil { + return err + } + fmt.Fprintf(os.Stdout, "%s\n", string(data)) + return nil + }, + } + opts.componentFlags.BindFlags(listSuitesCommand.Flags()) + opts.outputFlags.BindFlags(listSuitesCommand.Flags()) + + // Components + listComponentsCmd := &cobra.Command{ + Use: "components", + Short: "List available components", + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + var components []*extension.Component + registry.Walk(func(e *extension.Extension) { + components = append(components, &e.Component) + }) + + data, err := opts.outputFlags.Marshal(components) + if err != nil { + return err + } + fmt.Fprintf(os.Stdout, "%s\n", string(data)) + return nil + }, + } + opts.outputFlags.BindFlags(listComponentsCmd.Flags()) + + var listCmd = &cobra.Command{ + Use: "list [subcommand]", + Short: "List items", + RunE: func(cmd *cobra.Command, args []string) error { + return listTestsCmd.RunE(cmd, args) + }, + } + opts.suiteFlags.BindFlags(listCmd.Flags()) + opts.componentFlags.BindFlags(listCmd.Flags()) + opts.outputFlags.BindFlags(listCmd.Flags()) + listCmd.AddCommand(listTestsCmd, listComponentsCmd, listSuitesCommand) + + return listCmd +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun/runsuite.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun/runsuite.go new file mode 100644 index 0000000000000..c79b3ff786b40 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun/runsuite.go @@ -0,0 +1,63 @@ +package cmdrun + +import ( + "fmt" + "os" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + + "github.com/openshift-eng/openshift-tests-extension/pkg/extension" + "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" + "github.com/openshift-eng/openshift-tests-extension/pkg/flags" +) + +func NewRunSuiteCommand(registry *extension.Registry) *cobra.Command { + opts := struct { + componentFlags *flags.ComponentFlags + outputFlags *flags.OutputFlags + concurrencyFlags *flags.ConcurrencyFlags + }{ + componentFlags: flags.NewComponentFlags(), + outputFlags: flags.NewOutputFlags(), + concurrencyFlags: flags.NewConcurrencyFlags(), + } + + cmd := &cobra.Command{ + Use: "run-suite NAME", + Short: "Run a group of tests by suite", + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + ext := registry.Get(opts.componentFlags.Component) + if ext == nil { + return fmt.Errorf("component not found: %s", opts.componentFlags.Component) + } + if len(args) != 1 { + return fmt.Errorf("must specify one suite name") + } + + w, err := extensiontests.NewResultWriter(os.Stdout, extensiontests.ResultFormat(opts.outputFlags.Output)) + if err != nil { + return err + } + defer w.Flush() + + suite, err := ext.GetSuite(args[0]) + if err != nil { + return errors.Wrapf(err, "couldn't find suite: %s", args[0]) + } + + specs, err := ext.GetSpecs().Filter(suite.Qualifiers) + if err != nil { + return errors.Wrap(err, "couldn't filter specs") + } + + return specs.Run(w, opts.concurrencyFlags.MaxConcurency) + }, + } + opts.componentFlags.BindFlags(cmd.Flags()) + opts.outputFlags.BindFlags(cmd.Flags()) + opts.concurrencyFlags.BindFlags(cmd.Flags()) + + return cmd +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun/runtest.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun/runtest.go new file mode 100644 index 0000000000000..ea4b62cb6bfba --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun/runtest.go @@ -0,0 +1,81 @@ +package cmdrun + +import ( + "bufio" + "fmt" + "os" + + "github.com/spf13/cobra" + + "github.com/openshift-eng/openshift-tests-extension/pkg/extension" + "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" + "github.com/openshift-eng/openshift-tests-extension/pkg/flags" +) + +func NewRunTestCommand(registry *extension.Registry) *cobra.Command { + opts := struct { + componentFlags *flags.ComponentFlags + concurrencyFlags *flags.ConcurrencyFlags + nameFlags *flags.NamesFlags + outputFlags *flags.OutputFlags + }{ + componentFlags: flags.NewComponentFlags(), + nameFlags: flags.NewNamesFlags(), + outputFlags: flags.NewOutputFlags(), + concurrencyFlags: flags.NewConcurrencyFlags(), + } + + cmd := &cobra.Command{ + Use: "run-test [-n NAME...] [NAME]", + Short: "Runs tests by name", + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + ext := registry.Get(opts.componentFlags.Component) + if ext == nil { + return fmt.Errorf("component not found: %s", opts.componentFlags.Component) + } + if len(args) > 1 { + return fmt.Errorf("use --names to specify more than one test") + } + opts.nameFlags.Names = append(opts.nameFlags.Names, args...) + + // allow reading tests from an stdin pipe + info, err := os.Stdin.Stat() + if err != nil { + return err + } + if info.Mode()&os.ModeCharDevice == 0 { // Check if input is from a pipe + scanner := bufio.NewScanner(os.Stdin) + for scanner.Scan() { + opts.nameFlags.Names = append(opts.nameFlags.Names, scanner.Text()) + } + if err := scanner.Err(); err != nil { + return fmt.Errorf("error reading from stdin: %v", err) + } + } + + if len(opts.nameFlags.Names) == 0 { + return fmt.Errorf("must specify at least one test") + } + + specs, err := ext.FindSpecsByName(opts.nameFlags.Names...) + if err != nil { + return err + } + + w, err := extensiontests.NewResultWriter(os.Stdout, extensiontests.ResultFormat(opts.outputFlags.Output)) + if err != nil { + return err + } + defer w.Flush() + + return specs.Run(w, opts.concurrencyFlags.MaxConcurency) + }, + } + opts.componentFlags.BindFlags(cmd.Flags()) + opts.nameFlags.BindFlags(cmd.Flags()) + opts.outputFlags.BindFlags(cmd.Flags()) + opts.concurrencyFlags.BindFlags(cmd.Flags()) + + return cmd +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdupdate/update.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdupdate/update.go new file mode 100644 index 0000000000000..5d847308e59e9 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdupdate/update.go @@ -0,0 +1,84 @@ +package cmdupdate + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/spf13/cobra" + + "github.com/openshift-eng/openshift-tests-extension/pkg/extension" + "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" + "github.com/openshift-eng/openshift-tests-extension/pkg/flags" +) + +const metadataDirectory = ".openshift-tests-extension" + +// NewUpdateCommand adds an "update" command used to generate and verify the metadata we keep track of. This should +// be a black box to end users, i.e. we can add more criteria later they'll consume when revendoring. For now, +// we prevent a test to be renamed without updating other names, or a test to be deleted. +func NewUpdateCommand(registry *extension.Registry) *cobra.Command { + componentFlags := flags.NewComponentFlags() + + cmd := &cobra.Command{ + Use: "update", + Short: "Update test metadata", + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + ext := registry.Get(componentFlags.Component) + if ext == nil { + return fmt.Errorf("couldn't find the component %q", componentFlags.Component) + } + + // Create the metadata directory if it doesn't exist + if err := os.MkdirAll(metadataDirectory, 0755); err != nil { + return fmt.Errorf("failed to create directory %s: %w", metadataDirectory, err) + } + + // Read existing specs + metadataPath := filepath.Join(metadataDirectory, fmt.Sprintf("%s.json", strings.ReplaceAll(ext.Component.Identifier(), ":", "_"))) + var oldSpecs extensiontests.ExtensionTestSpecs + source, err := os.Open(metadataPath) + if err != nil { + if !os.IsNotExist(err) { + return fmt.Errorf("failed to open file: %s: %+w", metadataPath, err) + } + } else { + if err := json.NewDecoder(source).Decode(&oldSpecs); err != nil { + return fmt.Errorf("failed to decode file: %s: %+w", metadataPath, err) + } + + missing, err := ext.FindRemovedTestsWithoutRename(oldSpecs) + if err != nil && len(missing) > 0 { + fmt.Fprintf(os.Stderr, "Missing Tests:\n") + for _, name := range missing { + fmt.Fprintf(os.Stdout, " * %s\n", name) + } + fmt.Fprintf(os.Stderr, "\n") + + return fmt.Errorf("missing tests, if you've renamed tests you must add their names to OriginalName, " + + "or mark them obsolete") + } + } + + // no missing tests, write the results + newSpecs := ext.GetSpecs() + data, err := json.MarshalIndent(newSpecs, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal specs to JSON: %w", err) + } + + // Write the JSON data to the file + if err := os.WriteFile(metadataPath, data, 0644); err != nil { + return fmt.Errorf("failed to write file %s: %w", metadataPath, err) + } + + fmt.Printf("successfully updated metadata\n") + return nil + }, + } + componentFlags.BindFlags(cmd.Flags()) + return cmd +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/dbtime/time.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/dbtime/time.go new file mode 100644 index 0000000000000..b7651ba0220dd --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/dbtime/time.go @@ -0,0 +1,26 @@ +package dbtime + +import "time" + +// DBTime is a type suitable for direct importing into databases like BigQuery, +// formatted like 2006-01-02 15:04:05.000000 UTC. +type DBTime time.Time + +func Ptr(t time.Time) *DBTime { + return (*DBTime)(&t) +} + +func (dbt *DBTime) MarshalJSON() ([]byte, error) { + formattedTime := time.Time(*dbt).Format(`"2006-01-02 15:04:05.000000 UTC"`) + return []byte(formattedTime), nil +} + +func (dbt *DBTime) UnmarshalJSON(b []byte) error { + timeStr := string(b[1 : len(b)-1]) + parsedTime, err := time.Parse("2006-01-02 15:04:05.000000 UTC", timeStr) + if err != nil { + return err + } + *dbt = (DBTime)(parsedTime) + return nil +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extension.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extension.go new file mode 100644 index 0000000000000..747871ec1c272 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extension.go @@ -0,0 +1,155 @@ +package extension + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" + + et "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" + "github.com/openshift-eng/openshift-tests-extension/pkg/version" +) + +func NewExtension(product, kind, name string) *Extension { + return &Extension{ + APIVersion: CurrentExtensionAPIVersion, + Source: Source{ + Commit: version.CommitFromGit, + BuildDate: version.BuildDate, + GitTreeState: version.GitTreeState, + }, + Component: Component{ + Product: product, + Kind: kind, + Name: name, + }, + } +} + +func (e *Extension) GetSuite(name string) (*Suite, error) { + var suite *Suite + + for _, s := range e.Suites { + if s.Name == name { + suite = &s + break + } + } + + if suite == nil { + return nil, fmt.Errorf("no such suite: %s", name) + } + + return suite, nil +} + +func (e *Extension) GetSpecs() et.ExtensionTestSpecs { + return e.specs +} + +func (e *Extension) AddSpecs(specs et.ExtensionTestSpecs) { + specs.Walk(func(spec *et.ExtensionTestSpec) { + spec.Source = e.Component.Identifier() + }) + + e.specs = append(e.specs, specs...) +} + +// IgnoreObsoleteTests allows removal of a test. +func (e *Extension) IgnoreObsoleteTests(testNames ...string) { + if e.obsoleteTests == nil { + e.obsoleteTests = sets.New[string](testNames...) + } else { + e.obsoleteTests.Insert(testNames...) + } +} + +// FindRemovedTestsWithoutRename compares the current set of test specs against oldSpecs, including consideration of the original name, +// we return an error. Can be used to detect test renames or removals. +func (e *Extension) FindRemovedTestsWithoutRename(oldSpecs et.ExtensionTestSpecs) ([]string, error) { + currentSpecs := e.GetSpecs() + currentMap := make(map[string]bool) + + // Populate current specs into a map for quick lookup by both Name and OriginalName. + for _, spec := range currentSpecs { + currentMap[spec.Name] = true + if spec.OriginalName != "" { + currentMap[spec.OriginalName] = true + } + } + + var removedTests []string + + // Check oldSpecs against current specs. + for _, oldSpec := range oldSpecs { + // Skip if the test is marked as obsolete. + if e.obsoleteTests.Has(oldSpec.Name) { + continue + } + + // Check if oldSpec is missing in currentSpecs by both Name and OriginalName. + if !currentMap[oldSpec.Name] && (oldSpec.OriginalName == "" || !currentMap[oldSpec.OriginalName]) { + removedTests = append(removedTests, oldSpec.Name) + } + } + + // Return error if any removed tests were found. + if len(removedTests) > 0 { + return removedTests, fmt.Errorf("tests removed without rename: %v", removedTests) + } + + return nil, nil +} + +// AddGlobalSuite adds a suite whose qualifiers will apply to all tests, +// not just this one. Allowing a developer to create a composed suite of +// tests from many sources. +func (e *Extension) AddGlobalSuite(suite Suite) *Extension { + if e.Suites == nil { + e.Suites = []Suite{suite} + } else { + e.Suites = append(e.Suites, suite) + } + + return e +} + +// AddSuite adds a suite whose qualifiers will only apply to tests present +// in its own extension. +func (e *Extension) AddSuite(suite Suite) *Extension { + expr := fmt.Sprintf("source == %q", e.Component.Identifier()) + for i := range suite.Qualifiers { + suite.Qualifiers[i] = fmt.Sprintf("(%s) && (%s)", expr, suite.Qualifiers[i]) + } + e.AddGlobalSuite(suite) + return e +} + +func (e *Extension) FindSpecsByName(names ...string) (et.ExtensionTestSpecs, error) { + var specs et.ExtensionTestSpecs + var notFound []string + + for _, name := range names { + found := false + for i := range e.specs { + if e.specs[i].Name == name { + specs = append(specs, e.specs[i]) + found = true + break + } + } + if !found { + notFound = append(notFound, name) + } + } + + if len(notFound) > 0 { + return nil, fmt.Errorf("no such tests: %s", strings.Join(notFound, ", ")) + } + + return specs, nil +} + +func (e *Component) Identifier() string { + return fmt.Sprintf("%s:%s:%s", e.Product, e.Kind, e.Name) +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/result.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/result.go new file mode 100644 index 0000000000000..f33fb5c2745b2 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/result.go @@ -0,0 +1,12 @@ +package extensiontests + +func (results ExtensionTestResults) Walk(walkFn func(*ExtensionTestResult)) { + for i := range results { + walkFn(results[i]) + } +} + +// AddDetails adds additional information to an ExtensionTestResult. Value must marshal to JSON. +func (result *ExtensionTestResult) AddDetails(name string, value interface{}) { + result.Details = append(result.Details, Details{Name: name, Value: value}) +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/result_writer.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/result_writer.go new file mode 100644 index 0000000000000..821599791352c --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/result_writer.go @@ -0,0 +1,71 @@ +package extensiontests + +import ( + "encoding/json" + "fmt" + "io" +) + +type ResultWriter interface { + Write(result *ExtensionTestResult) + Flush() +} + +type NullResultWriter struct{} + +func (NullResultWriter) Write(*ExtensionTestResult) {} +func (NullResultWriter) Flush() {} + +type ResultFormat string + +var ( + JSON ResultFormat = "json" + JSONL ResultFormat = "jsonl" +) + +type JSONResultWriter struct { + out io.Writer + format ResultFormat + results ExtensionTestResults +} + +func NewResultWriter(out io.Writer, format ResultFormat) (*JSONResultWriter, error) { + switch format { + case JSON, JSONL: + // do nothing + default: + return nil, fmt.Errorf("unsupported result format: %s", format) + } + + return &JSONResultWriter{ + out: out, + format: format, + }, nil +} + +func (w *JSONResultWriter) Write(result *ExtensionTestResult) { + switch w.format { + case JSONL: + // JSONL gets written to out as we get the items + data, err := json.Marshal(result) + if err != nil { + panic(err) + } + fmt.Fprintf(w.out, "%s\n", string(data)) + case JSON: + w.results = append(w.results, result) + } +} + +func (w *JSONResultWriter) Flush() { + switch w.format { + case JSONL: + // we already wrote it out + case JSON: + data, err := json.MarshalIndent(w.results, "", " ") + if err != nil { + panic(err) + } + fmt.Fprintf(w.out, "%s\n", string(data)) + } +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/spec.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/spec.go new file mode 100644 index 0000000000000..66c2a4030bf91 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/spec.go @@ -0,0 +1,278 @@ +package extensiontests + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/checker/decls" + "github.com/google/cel-go/common/types" + + "github.com/openshift-eng/openshift-tests-extension/pkg/dbtime" +) + +// Walk iterates over all test specs, and executions the function provided. The test spec can be mutated. +func (specs ExtensionTestSpecs) Walk(walkFn func(*ExtensionTestSpec)) ExtensionTestSpecs { + for i := range specs { + walkFn(specs[i]) + } + + return specs +} + +func (specs ExtensionTestSpecs) Names() []string { + var names []string + for _, spec := range specs { + names = append(names, spec.Name) + } + return names +} + +// Run executes all the specs in parallel, up to maxConcurrent at the same time. Results +// are written to the given ResultWriter after each spec has completed execution. BeforeEach, +// BeforeAll, AfterEach, AfterAll hooks are executed when specified. "Each" hooks must be thread +// safe. Returns an error if any test spec failed, indicating the quantity of failures. +func (specs ExtensionTestSpecs) Run(w ResultWriter, maxConcurrent int) error { + queue := make(chan *ExtensionTestSpec) + failures := atomic.Int64{} + + // Execute beforeAll + for _, spec := range specs { + for _, beforeAllTask := range spec.beforeAll { + beforeAllTask.Run() + } + } + + // Feed the queue + go func() { + specs.Walk(func(spec *ExtensionTestSpec) { + queue <- spec + }) + close(queue) + }() + + // Start consumers + var wg sync.WaitGroup + for i := 0; i < maxConcurrent; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for spec := range queue { + for _, beforeEachTask := range spec.beforeEach { + beforeEachTask.Run(*spec) + } + + res := runSpec(spec) + if res.Result == ResultFailed { + failures.Add(1) + } + + for _, afterEachTask := range spec.afterEach { + afterEachTask.Run(res) + } + + // We can't assume the runner will set the name of a test; it may not know it. Even if + // it does, we may want to modify it (e.g. k8s-tests for annotations currently). + res.Name = spec.Name + w.Write(res) + } + }() + } + + // Wait for all consumers to finish + wg.Wait() + + // Execute afterAll + for _, spec := range specs { + for _, afterAllTask := range spec.afterAll { + afterAllTask.Run() + } + } + + failCount := failures.Load() + if failCount > 0 { + return fmt.Errorf("%d tests failed", failCount) + } + return nil +} + +// AddBeforeAll adds a function to be run once before all tests start executing. +func (specs ExtensionTestSpecs) AddBeforeAll(fn func()) { + task := &OneTimeTask{fn: fn} + specs.Walk(func(spec *ExtensionTestSpec) { + spec.beforeAll = append(spec.beforeAll, task) + }) +} + +// AddAfterAll adds a function to be run once after all tests have finished. +func (specs ExtensionTestSpecs) AddAfterAll(fn func()) { + task := &OneTimeTask{fn: fn} + specs.Walk(func(spec *ExtensionTestSpec) { + spec.afterAll = append(spec.afterAll, task) + }) +} + +// AddBeforeEach adds a function that runs before each test starts executing. The ExtensionTestSpec is +// passed in for contextual information, but must not be modified. The provided function must be thread +// safe. +func (specs ExtensionTestSpecs) AddBeforeEach(fn func(spec ExtensionTestSpec)) { + task := &SpecTask{fn: fn} + specs.Walk(func(spec *ExtensionTestSpec) { + spec.beforeEach = append(spec.beforeEach, task) + }) +} + +// AddAfterEach adds a function that runs after each test has finished executing. The ExtensionTestResult +// can be modified if needed. The provided function must be thread safe. +func (specs ExtensionTestSpecs) AddAfterEach(fn func(task *ExtensionTestResult)) { + task := &TestResultTask{fn: fn} + specs.Walk(func(spec *ExtensionTestSpec) { + spec.afterEach = append(spec.afterEach, task) + }) +} + +// MustFilter filters specs using the given celExprs. Each celExpr is OR'd together, if any +// match the spec is included in the filtered set. If your CEL expression is invalid or filtering +// otherwise fails, this function panics. +func (specs ExtensionTestSpecs) MustFilter(celExprs []string) ExtensionTestSpecs { + specs, err := specs.Filter(celExprs) + if err != nil { + panic(fmt.Sprintf("filter did not succeed: %s", err.Error())) + } + + return specs +} + +// Filter filters specs using the given celExprs. Each celExpr is OR'd together, if any +// match the spec is included in the filtered set. +func (specs ExtensionTestSpecs) Filter(celExprs []string) (ExtensionTestSpecs, error) { + var filteredSpecs ExtensionTestSpecs + + // Empty filters returns all + if len(celExprs) == 0 { + return specs, nil + } + + env, err := cel.NewEnv( + cel.Declarations( + decls.NewVar("source", decls.String), + decls.NewVar("name", decls.String), + decls.NewVar("originalName", decls.String), + decls.NewVar("labels", decls.NewListType(decls.String)), + decls.NewVar("tags", decls.NewMapType(decls.String, decls.String)), + ), + ) + if err != nil { + return nil, fmt.Errorf("failed to create CEL environment: %w", err) + } + + // OR all expressions together + for _, spec := range specs { + include := false + for _, celExpr := range celExprs { + // Parse CEL expression + ast, iss := env.Parse(celExpr) + if iss.Err() != nil { + return nil, fmt.Errorf("error parsing CEL expression '%s': %v", celExpr, iss.Err()) + } + + // Check the AST + checked, iss := env.Check(ast) + if iss.Err() != nil { + return nil, fmt.Errorf("error checking CEL expression '%s': %v", celExpr, iss.Err()) + } + + // Create a CEL program from the checked AST + prg, err := env.Program(checked) + if err != nil { + return nil, fmt.Errorf("error creating CEL program: %v", err) + } + + out, _, err := prg.Eval(map[string]interface{}{ + "name": spec.Name, + "source": spec.Source, + "originalName": spec.OriginalName, + "labels": spec.Labels.UnsortedList(), + "tags": spec.Tags, + }) + if err != nil { + return nil, fmt.Errorf("error evaluating CEL expression: %v", err) + } + + // If any CEL expression evaluates to true, include the TestSpec + if out == types.True { + include = true + break + } + } + if include { + filteredSpecs = append(filteredSpecs, spec) + } + } + + return filteredSpecs, nil +} + +// AddLabel adds the labels to each spec. +func (specs ExtensionTestSpecs) AddLabel(labels ...string) ExtensionTestSpecs { + for i := range specs { + specs[i].Labels.Insert(labels...) + } + + return specs +} + +// RemoveLabel removes the labels from each spec. +func (specs ExtensionTestSpecs) RemoveLabel(labels ...string) ExtensionTestSpecs { + for i := range specs { + specs[i].Labels.Delete(labels...) + } + + return specs +} + +// SetTag specifies a key/value pair for each spec. +func (specs ExtensionTestSpecs) SetTag(key, value string) ExtensionTestSpecs { + for i := range specs { + specs[i].Tags[key] = value + } + + return specs +} + +// UnsetTag removes the specified key from each spec. +func (specs ExtensionTestSpecs) UnsetTag(key string) ExtensionTestSpecs { + for i := range specs { + delete(specs[i].Tags, key) + } + + return specs +} + +func runSpec(spec *ExtensionTestSpec) *ExtensionTestResult { + startTime := time.Now().UTC() + res := spec.Run() + duration := time.Since(startTime) + endTime := startTime.Add(duration).UTC() + if res == nil { + // this shouldn't happen + panic(fmt.Sprintf("test produced no result: %s", spec.Name)) + } + + res.Lifecycle = spec.Lifecycle + + // If the runner doesn't populate this info, we should set it + if res.StartTime == nil { + res.StartTime = dbtime.Ptr(startTime) + } + if res.EndTime == nil { + res.EndTime = dbtime.Ptr(endTime) + } + if res.Duration == 0 { + res.Duration = duration.Milliseconds() + } + + return res +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/task.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/task.go new file mode 100644 index 0000000000000..e808bea87bb8b --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/task.go @@ -0,0 +1,31 @@ +package extensiontests + +import "sync/atomic" + +type SpecTask struct { + fn func(spec ExtensionTestSpec) +} + +func (t *SpecTask) Run(spec ExtensionTestSpec) { + t.fn(spec) +} + +type TestResultTask struct { + fn func(result *ExtensionTestResult) +} + +func (t *TestResultTask) Run(result *ExtensionTestResult) { + t.fn(result) +} + +type OneTimeTask struct { + fn func() + executed int32 // Atomic boolean to indicate whether the function has been run +} + +func (t *OneTimeTask) Run() { + // Ensure one-time tasks are only run once + if atomic.CompareAndSwapInt32(&t.executed, 0, 1) { + t.fn() + } +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/types.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/types.go new file mode 100644 index 0000000000000..2d507d6feeef8 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/types.go @@ -0,0 +1,90 @@ +package extensiontests + +import ( + "k8s.io/apimachinery/pkg/util/sets" + + "github.com/openshift-eng/openshift-tests-extension/pkg/dbtime" +) + +type Lifecycle string + +var LifecycleInforming Lifecycle = "informing" +var LifecycleBlocking Lifecycle = "blocking" + +type ExtensionTestSpecs []*ExtensionTestSpec + +type ExtensionTestSpec struct { + Name string `json:"name"` + + // OriginalName contains the very first name this test was ever known as, used to preserve + // history across all names. + OriginalName string `json:"originalName,omitempty"` + + // Labels are single string values to apply to the test spec + Labels sets.Set[string] `json:"labels"` + + // Tags are key:value pairs + Tags map[string]string `json:"tags,omitempty"` + + // Resources gives optional information about what's required to run this test. + Resources Resources `json:"resources"` + + // Source is the origin of the test. + Source string `json:"source"` + + // Lifecycle informs the executor whether the test is informing only, and should not cause the + // overall job run to fail, or if it's blocking where a failure of the test is fatal. + // Informing lifecycle tests can be used temporarily to gather information about a test's stability. + // Tests must not remain informing forever. + Lifecycle Lifecycle `json:"lifecycle"` + + // Run invokes a test + Run func() *ExtensionTestResult `json:"-"` + + // Hook functions + afterAll []*OneTimeTask + beforeAll []*OneTimeTask + afterEach []*TestResultTask + beforeEach []*SpecTask +} + +type Resources struct { + Isolation Isolation `json:"isolation"` + Memory string `json:"memory,omitempty"` + Duration string `json:"duration,omitempty"` + Timeout string `json:"timeout,omitempty"` +} + +type Isolation struct { + Mode string `json:"mode,omitempty"` + Conflict []string `json:"conflict,omitempty"` +} + +type ExtensionTestResults []*ExtensionTestResult + +type Result string + +var ResultPassed Result = "passed" +var ResultSkipped Result = "skipped" +var ResultFailed Result = "failed" + +type ExtensionTestResult struct { + Name string `json:"name"` + Lifecycle Lifecycle `json:"lifecycle"` + Duration int64 `json:"duration"` + StartTime *dbtime.DBTime `json:"startTime"` + EndTime *dbtime.DBTime `json:"endTime"` + Result Result `json:"result"` + Output string `json:"output"` + Error string `json:"error,omitempty"` + Details []Details `json:"details,omitempty"` +} + +// Details are human-readable messages to further explain skips, timeouts, etc. +// It can also be used to provide contemporaneous information about failures +// that may not be easily returned by must-gather. For larger artifacts (greater than +// 10KB, write them to $EXTENSION_ARTIFACTS_DIR. +type Details struct { + Name string `json:"name"` + Value interface{} `json:"value"` +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/registry.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/registry.go new file mode 100644 index 0000000000000..bbae421df7747 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/registry.go @@ -0,0 +1,39 @@ +package extension + +const DefaultExtension = "default" + +type Registry struct { + extensions map[string]*Extension +} + +func NewRegistry() *Registry { + var r Registry + return &r +} + +func (r *Registry) Walk(walkFn func(*Extension)) { + for k := range r.extensions { + if k == DefaultExtension { + continue + } + walkFn(r.extensions[k]) + } +} + +func (r *Registry) Get(name string) *Extension { + return r.extensions[name] +} + +func (r *Registry) Register(extension *Extension) { + if r.extensions == nil { + r.extensions = make(map[string]*Extension) + // first extension is default + r.extensions[DefaultExtension] = extension + } + + r.extensions[extension.Component.Identifier()] = extension +} + +func (r *Registry) Deregister(name string) { + delete(r.extensions, name) +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/types.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/types.go new file mode 100644 index 0000000000000..2e840c9e88df9 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/types.go @@ -0,0 +1,55 @@ +package extension + +import ( + "k8s.io/apimachinery/pkg/util/sets" + + "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" +) + +const CurrentExtensionAPIVersion = "v1.0" + +// Extension represents an extension to openshift-tests. +type Extension struct { + APIVersion string `json:"apiVersion"` + Source Source `json:"source"` + Component Component `json:"component"` + + // Suites that the extension wants to advertise/participate in. + Suites []Suite `json:"suites"` + + // Private data + specs extensiontests.ExtensionTestSpecs + obsoleteTests sets.Set[string] +} + +// Source contains the details of the commit and source URL. +type Source struct { + // Commit from which this binary was compiled. + Commit string `json:"commit"` + // BuildDate ISO8601 string of when the binary was built + BuildDate string `json:"build_date"` + // GitTreeState lets you know the status of the git tree (clean/dirty) + GitTreeState string `json:"git_tree_state"` + // SourceURL contains the url of the git repository (if known) that this extension was built from. + SourceURL string `json:"source_url,omitempty"` +} + +// Component represents the component the binary acts on. +type Component struct { + // The product this component is part of. + Product string `json:"product"` + // The type of the component. + Kind string `json:"type"` + // The name of the component. + Name string `json:"name"` +} + +// Suite represents additional suites the extension wants to advertise. +type Suite struct { + // The name of the suite. + Name string `json:"name"` + // Parent suites this suite is part of. + Parents []string `json:"parents,omitempty"` + // Qualifiers are CEL expressions that are OR'd together for test selection that are members of the suite. + Qualifiers []string `json:"qualifiers,omitempty"` +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/component.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/component.go new file mode 100644 index 0000000000000..55e91819ba9ef --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/component.go @@ -0,0 +1,25 @@ +package flags + +import ( + "github.com/spf13/pflag" + + "github.com/openshift-eng/openshift-tests-extension/pkg/extension" +) + +// ComponentFlags contains information for specifying the component. +type ComponentFlags struct { + Component string +} + +func NewComponentFlags() *ComponentFlags { + return &ComponentFlags{ + Component: extension.DefaultExtension, + } +} + +func (f *ComponentFlags) BindFlags(fs *pflag.FlagSet) { + fs.StringVar(&f.Component, + "component", + f.Component, + "specify the component to enable") +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/concurrency.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/concurrency.go new file mode 100644 index 0000000000000..2db07c7654757 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/concurrency.go @@ -0,0 +1,23 @@ +package flags + +import "github.com/spf13/pflag" + +// ConcurrencyFlags contains information for configuring concurrency +type ConcurrencyFlags struct { + MaxConcurency int +} + +func NewConcurrencyFlags() *ConcurrencyFlags { + return &ConcurrencyFlags{ + MaxConcurency: 10, + } +} + +func (f *ConcurrencyFlags) BindFlags(fs *pflag.FlagSet) { + fs.IntVarP(&f.MaxConcurency, + "max-concurrency", + "c", + f.MaxConcurency, + "maximum number of tests to run in parallel", + ) +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/names.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/names.go new file mode 100644 index 0000000000000..b073f1a26b629 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/names.go @@ -0,0 +1,24 @@ +package flags + +import ( + "github.com/spf13/pflag" +) + +// NamesFlags contains information for specifying multiple test names. +type NamesFlags struct { + Names []string +} + +func NewNamesFlags() *NamesFlags { + return &NamesFlags{ + Names: []string{}, + } +} + +func (f *NamesFlags) BindFlags(fs *pflag.FlagSet) { + fs.StringSliceVarP(&f.Names, + "names", + "n", + f.Names, + "specify test name (can be specified multiple times)") +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/output.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/output.go new file mode 100644 index 0000000000000..24f49f6387b16 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/output.go @@ -0,0 +1,95 @@ +package flags + +import ( + "encoding/json" + "reflect" + "strings" + + "github.com/pkg/errors" + "github.com/spf13/pflag" +) + +// OutputFlags contains information for specifying multiple test names. +type OutputFlags struct { + Output string +} + +func NewOutputFlags() *OutputFlags { + return &OutputFlags{ + Output: "json", + } +} + +func (f *OutputFlags) BindFlags(fs *pflag.FlagSet) { + fs.StringVarP(&f.Output, + "output", + "o", + f.Output, + "output mode") +} + +func (o *OutputFlags) Marshal(v interface{}) ([]byte, error) { + switch o.Output { + case "", "json": + j, err := json.MarshalIndent(&v, "", " ") + if err != nil { + return nil, err + } + return j, nil + case "jsonl": + // Check if v is a slice or array + val := reflect.ValueOf(v) + if val.Kind() == reflect.Slice || val.Kind() == reflect.Array { + var result []byte + for i := 0; i < val.Len(); i++ { + item := val.Index(i).Interface() + j, err := json.Marshal(item) + if err != nil { + return nil, err + } + result = append(result, j...) + result = append(result, '\n') // Append newline after each item + } + return result, nil + } + return nil, errors.New("jsonl format requires a slice or array") + case "names": + val := reflect.ValueOf(v) + if val.Kind() == reflect.Slice || val.Kind() == reflect.Array { + var names []string + outerLoop: + for i := 0; i < val.Len(); i++ { + item := val.Index(i) + // Check for Name() or Identifier() methods + itemInterface := item.Interface() + nameFuncs := []string{"Name", "Identifier"} + for _, fn := range nameFuncs { + method := reflect.ValueOf(itemInterface).MethodByName(fn) + if method.IsValid() && method.Kind() == reflect.Func && method.Type().NumIn() == 0 && method.Type().NumOut() == 1 && method.Type().Out(0).Kind() == reflect.String { + name := method.Call(nil)[0].String() + names = append(names, name) + continue outerLoop + } + } + + // Dereference pointer if needed + if item.Kind() == reflect.Ptr { + item = item.Elem() + } + // Check for struct with Name field + if item.Kind() == reflect.Struct { + nameField := item.FieldByName("Name") + if nameField.IsValid() && nameField.Kind() == reflect.String { + names = append(names, nameField.String()) + } + } else { + return nil, errors.New("items must have a Name field or a Name() method") + } + } + return []byte(strings.Join(names, "\n")), nil + } + return nil, errors.New("names format requires an array of structs") + default: + return nil, errors.Errorf("invalid output format: %s", o.Output) + } +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/suite.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/suite.go new file mode 100644 index 0000000000000..23de832a850ab --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/suite.go @@ -0,0 +1,21 @@ +package flags + +import ( + "github.com/spf13/pflag" +) + +// SuiteFlags contains information for specifying the suite. +type SuiteFlags struct { + Suite string +} + +func NewSuiteFlags() *SuiteFlags { + return &SuiteFlags{} +} + +func (f *SuiteFlags) BindFlags(fs *pflag.FlagSet) { + fs.StringVar(&f.Suite, + "suite", + f.Suite, + "specify the suite to use") +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo/util.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo/util.go new file mode 100644 index 0000000000000..9faea67f849f9 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo/util.go @@ -0,0 +1,153 @@ +package ginkgo + +import ( + "fmt" + "os" + "strings" + "sync" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/types" + "github.com/onsi/gomega" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/util/sets" + + ext "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" +) + +func configureGinkgo() (*types.SuiteConfig, *types.ReporterConfig, error) { + if !ginkgo.GetSuite().InPhaseBuildTree() { + if err := ginkgo.GetSuite().BuildTree(); err != nil { + return nil, nil, errors.Wrapf(err, "couldn't build ginkgo tree") + } + } + + // Ginkgo initialization + ginkgo.GetSuite().ClearBeforeAndAfterSuiteNodes() + suiteConfig, reporterConfig := ginkgo.GinkgoConfiguration() + suiteConfig.RandomizeAllSpecs = true + suiteConfig.Timeout = 24 * time.Hour + reporterConfig.NoColor = true + reporterConfig.Verbose = true + ginkgo.SetReporterConfig(reporterConfig) + + // Write output to Stderr + ginkgo.GinkgoWriter = ginkgo.NewWriter(os.Stderr) + + gomega.RegisterFailHandler(ginkgo.Fail) + + return &suiteConfig, &reporterConfig, nil +} + +func BuildExtensionTestSpecsFromOpenShiftGinkgoSuite() (ext.ExtensionTestSpecs, error) { + var tests []*ext.ExtensionTestSpec + var enforceSerialExecutionForGinkgo sync.Mutex // in-process parallelization for ginkgo is impossible so far + + if _, _, err := configureGinkgo(); err != nil { + return nil, err + } + + cwd, err := os.Getwd() + if err != nil { + return nil, errors.Wrap(err, "couldn't get current working directory") + } + + ginkgo.GetSuite().WalkTests(func(name string, spec types.TestSpec) { + testCase := &ext.ExtensionTestSpec{ + Name: spec.Text(), + Labels: sets.New[string](spec.Labels()...), + Lifecycle: GetLifecycle(spec.Labels()), + Run: func() *ext.ExtensionTestResult { + enforceSerialExecutionForGinkgo.Lock() + defer enforceSerialExecutionForGinkgo.Unlock() + + suiteConfig, reporterConfig, _ := configureGinkgo() + + result := &ext.ExtensionTestResult{ + Name: spec.Text(), + } + + var summary types.SpecReport + ginkgo.GetSuite().RunSpec(spec, ginkgo.Labels{}, "", cwd, ginkgo.GetFailer(), ginkgo.GetWriter(), *suiteConfig, + *reporterConfig) + for _, report := range ginkgo.GetSuite().GetReport().SpecReports { + if report.NumAttempts > 0 { + summary = report + } + } + + result.Output = summary.CapturedGinkgoWriterOutput + result.Error = summary.CapturedStdOutErr + + switch { + case summary.State == types.SpecStatePassed: + result.Result = ext.ResultPassed + case summary.State == types.SpecStateSkipped: + result.Result = ext.ResultSkipped + case summary.State == types.SpecStateFailed, summary.State == types.SpecStatePanicked, summary.State == types.SpecStateInterrupted: + result.Result = ext.ResultFailed + var errors []string + if len(summary.Failure.ForwardedPanic) > 0 { + if len(summary.Failure.Location.FullStackTrace) > 0 { + errors = append(errors, fmt.Sprintf("\n%s\n", summary.Failure.Location.FullStackTrace)) + } + errors = append(errors, fmt.Sprintf("fail [%s:%d]: Test Panicked: %s", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.ForwardedPanic)) + } + errors = append(errors, fmt.Sprintf("fail [%s:%d]: %s", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.Message)) + result.Error = strings.Join(errors, "\n") + default: + panic(fmt.Sprintf("test produced unknown outcome: %#v", summary)) + } + + return result + }, + } + tests = append(tests, testCase) + }) + + return tests, nil +} + +func Informing() ginkgo.Labels { + return ginkgo.Label(fmt.Sprintf("Lifecycle:%s", ext.LifecycleInforming)) +} + +func Slow() ginkgo.Labels { + return ginkgo.Label("SLOW") +} + +func Blocking() ginkgo.Labels { + return ginkgo.Label(fmt.Sprintf("Lifecycle:%s", ext.LifecycleBlocking)) +} + +func GetLifecycle(labels ginkgo.Labels) ext.Lifecycle { + for _, label := range labels { + res := strings.Split(label, ":") + if len(res) != 2 || !strings.EqualFold(res[0], "lifecycle") { + continue + } + return MustLifecycle(res[1]) // this panics if unsupported lifecycle is used + } + + return ext.LifecycleBlocking +} + +func MustLifecycle(l string) ext.Lifecycle { + switch ext.Lifecycle(l) { + case ext.LifecycleInforming, ext.LifecycleBlocking: + return ext.Lifecycle(l) + default: + panic(fmt.Sprintf("unknown test lifecycle: %s", l)) + } +} + +func lastFilenameSegment(filename string) string { + if parts := strings.Split(filename, "/vendor/"); len(parts) > 1 { + return parts[len(parts)-1] + } + if parts := strings.Split(filename, "/src/"); len(parts) > 1 { + return parts[len(parts)-1] + } + return filename +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/version/version.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/version/version.go new file mode 100644 index 0000000000000..7d6a3309b312b --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/version/version.go @@ -0,0 +1,11 @@ +package version + +var ( + // CommitFromGit is a constant representing the source version that + // generated this build. It should be set during build via -ldflags. + CommitFromGit string + // BuildDate in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') + BuildDate string + // GitTreeState has the state of git tree, either "clean" or "dirty" + GitTreeState string +) diff --git a/vendor/github.com/openshift/api/LICENSE b/vendor/github.com/openshift/api/LICENSE new file mode 100644 index 0000000000000..5c389317ecc6d --- /dev/null +++ b/vendor/github.com/openshift/api/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2020 Red Hat, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/api/apiserver/v1/Makefile b/vendor/github.com/openshift/api/apiserver/v1/Makefile new file mode 100644 index 0000000000000..a2d1fa49bef40 --- /dev/null +++ b/vendor/github.com/openshift/api/apiserver/v1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="apiserver.openshift.io/v1" diff --git a/vendor/github.com/openshift/api/apiserver/v1/doc.go b/vendor/github.com/openshift/api/apiserver/v1/doc.go new file mode 100644 index 0000000000000..cc6a8aa617ccc --- /dev/null +++ b/vendor/github.com/openshift/api/apiserver/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=apiserver.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/apiserver/v1/register.go b/vendor/github.com/openshift/api/apiserver/v1/register.go new file mode 100644 index 0000000000000..9d6e126e40e8c --- /dev/null +++ b/vendor/github.com/openshift/api/apiserver/v1/register.go @@ -0,0 +1,38 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "apiserver.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &APIRequestCount{}, + &APIRequestCountList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/apiserver/v1/types_apirequestcount.go b/vendor/github.com/openshift/api/apiserver/v1/types_apirequestcount.go new file mode 100644 index 0000000000000..645d796f7759a --- /dev/null +++ b/vendor/github.com/openshift/api/apiserver/v1/types_apirequestcount.go @@ -0,0 +1,177 @@ +// Package v1 is an api version in the apiserver.openshift.io group +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +const ( + // RemovedInReleaseLabel is a label which can be used to select APIRequestCounts based on the release + // in which they are removed. The value is equivalent to .status.removedInRelease. + RemovedInReleaseLabel = "apirequestcounts.apiserver.openshift.io/removedInRelease" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient:nonNamespaced +// +openshift:compatibility-gen:level=1 + +// APIRequestCount tracks requests made to an API. The instance name must +// be of the form `resource.version.group`, matching the resource. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=apirequestcounts,scope=Cluster +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/897 +// +openshift:file-pattern=operatorName=kube-apiserver +// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true +// +kubebuilder:printcolumn:name=RemovedInRelease,JSONPath=.status.removedInRelease,type=string,description=Release in which an API will be removed. +// +kubebuilder:printcolumn:name=RequestsInCurrentHour,JSONPath=.status.currentHour.requestCount,type=integer,description=Number of requests in the current hour. +// +kubebuilder:printcolumn:name=RequestsInLast24h,JSONPath=.status.requestCount,type=integer,description=Number of requests in the last 24h. +type APIRequestCount struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec defines the characteristics of the resource. + // +required + Spec APIRequestCountSpec `json:"spec"` + + // status contains the observed state of the resource. + Status APIRequestCountStatus `json:"status,omitempty"` +} + +type APIRequestCountSpec struct { + + // numberOfUsersToReport is the number of users to include in the report. + // If unspecified or zero, the default is ten. This is default is subject to change. + // +kubebuilder:default:=10 + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=100 + // +optional + NumberOfUsersToReport int64 `json:"numberOfUsersToReport"` +} + +// +k8s:deepcopy-gen=true +type APIRequestCountStatus struct { + + // conditions contains details of the current status of this API Resource. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []metav1.Condition `json:"conditions" patchStrategy:"merge" patchMergeKey:"type"` + + // removedInRelease is when the API will be removed. + // +kubebuilder:validation:MinLength=0 + // +kubebuilder:validation:Pattern=^[0-9][0-9]*\.[0-9][0-9]*$ + // +kubebuilder:validation:MaxLength=64 + // +optional + RemovedInRelease string `json:"removedInRelease,omitempty"` + + // requestCount is a sum of all requestCounts across all current hours, nodes, and users. + // +kubebuilder:validation:Minimum=0 + // +required + RequestCount int64 `json:"requestCount"` + + // currentHour contains request history for the current hour. This is porcelain to make the API + // easier to read by humans seeing if they addressed a problem. This field is reset on the hour. + // +optional + CurrentHour PerResourceAPIRequestLog `json:"currentHour"` + + // last24h contains request history for the last 24 hours, indexed by the hour, so + // 12:00AM-12:59 is in index 0, 6am-6:59am is index 6, etc. The index of the current hour + // is updated live and then duplicated into the requestsLastHour field. + // +kubebuilder:validation:MaxItems=24 + // +optional + Last24h []PerResourceAPIRequestLog `json:"last24h"` +} + +// PerResourceAPIRequestLog logs request for various nodes. +type PerResourceAPIRequestLog struct { + + // byNode contains logs of requests per node. + // +kubebuilder:validation:MaxItems=512 + // +optional + ByNode []PerNodeAPIRequestLog `json:"byNode"` + + // requestCount is a sum of all requestCounts across nodes. + // +kubebuilder:validation:Minimum=0 + // +required + RequestCount int64 `json:"requestCount"` +} + +// PerNodeAPIRequestLog contains logs of requests to a certain node. +type PerNodeAPIRequestLog struct { + + // nodeName where the request are being handled. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=512 + // +required + NodeName string `json:"nodeName"` + + // requestCount is a sum of all requestCounts across all users, even those outside of the top 10 users. + // +kubebuilder:validation:Minimum=0 + // +required + RequestCount int64 `json:"requestCount"` + + // byUser contains request details by top .spec.numberOfUsersToReport users. + // Note that because in the case of an apiserver, restart the list of top users is determined on a best-effort basis, + // the list might be imprecise. + // In addition, some system users may be explicitly included in the list. + // +kubebuilder:validation:MaxItems=500 + ByUser []PerUserAPIRequestCount `json:"byUser"` +} + +// PerUserAPIRequestCount contains logs of a user's requests. +type PerUserAPIRequestCount struct { + + // username that made the request. + // +kubebuilder:validation:MaxLength=512 + UserName string `json:"username"` + + // userAgent that made the request. + // The same user often has multiple binaries which connect (pods with many containers). The different binaries + // will have different userAgents, but the same user. In addition, we have userAgents with version information + // embedded and the userName isn't likely to change. + // +kubebuilder:validation:MaxLength=1024 + UserAgent string `json:"userAgent"` + + // requestCount of requests by the user across all verbs. + // +kubebuilder:validation:Minimum=0 + // +required + RequestCount int64 `json:"requestCount"` + + // byVerb details by verb. + // +kubebuilder:validation:MaxItems=10 + ByVerb []PerVerbAPIRequestCount `json:"byVerb"` +} + +// PerVerbAPIRequestCount requestCounts requests by API request verb. +type PerVerbAPIRequestCount struct { + + // verb of API request (get, list, create, etc...) + // +kubebuilder:validation:MaxLength=20 + // +required + Verb string `json:"verb"` + + // requestCount of requests for verb. + // +kubebuilder:validation:Minimum=0 + // +required + RequestCount int64 `json:"requestCount"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:compatibility-gen:level=1 + +// APIRequestCountList is a list of APIRequestCount resources. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +type APIRequestCountList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []APIRequestCount `json:"items"` +} diff --git a/vendor/github.com/openshift/api/apiserver/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..79be37153579f --- /dev/null +++ b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.deepcopy.go @@ -0,0 +1,202 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIRequestCount) DeepCopyInto(out *APIRequestCount) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIRequestCount. +func (in *APIRequestCount) DeepCopy() *APIRequestCount { + if in == nil { + return nil + } + out := new(APIRequestCount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIRequestCount) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIRequestCountList) DeepCopyInto(out *APIRequestCountList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]APIRequestCount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIRequestCountList. +func (in *APIRequestCountList) DeepCopy() *APIRequestCountList { + if in == nil { + return nil + } + out := new(APIRequestCountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIRequestCountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIRequestCountSpec) DeepCopyInto(out *APIRequestCountSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIRequestCountSpec. +func (in *APIRequestCountSpec) DeepCopy() *APIRequestCountSpec { + if in == nil { + return nil + } + out := new(APIRequestCountSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIRequestCountStatus) DeepCopyInto(out *APIRequestCountStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.CurrentHour.DeepCopyInto(&out.CurrentHour) + if in.Last24h != nil { + in, out := &in.Last24h, &out.Last24h + *out = make([]PerResourceAPIRequestLog, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIRequestCountStatus. +func (in *APIRequestCountStatus) DeepCopy() *APIRequestCountStatus { + if in == nil { + return nil + } + out := new(APIRequestCountStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerNodeAPIRequestLog) DeepCopyInto(out *PerNodeAPIRequestLog) { + *out = *in + if in.ByUser != nil { + in, out := &in.ByUser, &out.ByUser + *out = make([]PerUserAPIRequestCount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerNodeAPIRequestLog. +func (in *PerNodeAPIRequestLog) DeepCopy() *PerNodeAPIRequestLog { + if in == nil { + return nil + } + out := new(PerNodeAPIRequestLog) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerResourceAPIRequestLog) DeepCopyInto(out *PerResourceAPIRequestLog) { + *out = *in + if in.ByNode != nil { + in, out := &in.ByNode, &out.ByNode + *out = make([]PerNodeAPIRequestLog, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerResourceAPIRequestLog. +func (in *PerResourceAPIRequestLog) DeepCopy() *PerResourceAPIRequestLog { + if in == nil { + return nil + } + out := new(PerResourceAPIRequestLog) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerUserAPIRequestCount) DeepCopyInto(out *PerUserAPIRequestCount) { + *out = *in + if in.ByVerb != nil { + in, out := &in.ByVerb, &out.ByVerb + *out = make([]PerVerbAPIRequestCount, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerUserAPIRequestCount. +func (in *PerUserAPIRequestCount) DeepCopy() *PerUserAPIRequestCount { + if in == nil { + return nil + } + out := new(PerUserAPIRequestCount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerVerbAPIRequestCount) DeepCopyInto(out *PerVerbAPIRequestCount) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerVerbAPIRequestCount. +func (in *PerVerbAPIRequestCount) DeepCopy() *PerVerbAPIRequestCount { + if in == nil { + return nil + } + out := new(PerVerbAPIRequestCount) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/apiserver/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 0000000000000..f5ff911a2f57b --- /dev/null +++ b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,34 @@ +apirequestcounts.apiserver.openshift.io: + Annotations: + include.release.openshift.io/self-managed-high-availability: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/897 + CRDName: apirequestcounts.apiserver.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: kube-apiserver + FilenameOperatorOrdering: "" + FilenameRunLevel: "" + GroupName: apiserver.openshift.io + HasStatus: true + KindName: APIRequestCount + Labels: {} + PluralName: apirequestcounts + PrinterColumns: + - description: Release in which an API will be removed. + jsonPath: .status.removedInRelease + name: RemovedInRelease + type: string + - description: Number of requests in the current hour. + jsonPath: .status.currentHour.requestCount + name: RequestsInCurrentHour + type: integer + - description: Number of requests in the last 24h. + jsonPath: .status.requestCount + name: RequestsInLast24h + type: integer + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + diff --git a/vendor/github.com/openshift/api/apiserver/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000..b3d6b615fc69f --- /dev/null +++ b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,97 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_APIRequestCount = map[string]string{ + "": "APIRequestCount tracks requests made to an API. The instance name must be of the form `resource.version.group`, matching the resource.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec defines the characteristics of the resource.", + "status": "status contains the observed state of the resource.", +} + +func (APIRequestCount) SwaggerDoc() map[string]string { + return map_APIRequestCount +} + +var map_APIRequestCountList = map[string]string{ + "": "APIRequestCountList is a list of APIRequestCount resources.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (APIRequestCountList) SwaggerDoc() map[string]string { + return map_APIRequestCountList +} + +var map_APIRequestCountSpec = map[string]string{ + "numberOfUsersToReport": "numberOfUsersToReport is the number of users to include in the report. If unspecified or zero, the default is ten. This is default is subject to change.", +} + +func (APIRequestCountSpec) SwaggerDoc() map[string]string { + return map_APIRequestCountSpec +} + +var map_APIRequestCountStatus = map[string]string{ + "conditions": "conditions contains details of the current status of this API Resource.", + "removedInRelease": "removedInRelease is when the API will be removed.", + "requestCount": "requestCount is a sum of all requestCounts across all current hours, nodes, and users.", + "currentHour": "currentHour contains request history for the current hour. This is porcelain to make the API easier to read by humans seeing if they addressed a problem. This field is reset on the hour.", + "last24h": "last24h contains request history for the last 24 hours, indexed by the hour, so 12:00AM-12:59 is in index 0, 6am-6:59am is index 6, etc. The index of the current hour is updated live and then duplicated into the requestsLastHour field.", +} + +func (APIRequestCountStatus) SwaggerDoc() map[string]string { + return map_APIRequestCountStatus +} + +var map_PerNodeAPIRequestLog = map[string]string{ + "": "PerNodeAPIRequestLog contains logs of requests to a certain node.", + "nodeName": "nodeName where the request are being handled.", + "requestCount": "requestCount is a sum of all requestCounts across all users, even those outside of the top 10 users.", + "byUser": "byUser contains request details by top .spec.numberOfUsersToReport users. Note that because in the case of an apiserver, restart the list of top users is determined on a best-effort basis, the list might be imprecise. In addition, some system users may be explicitly included in the list.", +} + +func (PerNodeAPIRequestLog) SwaggerDoc() map[string]string { + return map_PerNodeAPIRequestLog +} + +var map_PerResourceAPIRequestLog = map[string]string{ + "": "PerResourceAPIRequestLog logs request for various nodes.", + "byNode": "byNode contains logs of requests per node.", + "requestCount": "requestCount is a sum of all requestCounts across nodes.", +} + +func (PerResourceAPIRequestLog) SwaggerDoc() map[string]string { + return map_PerResourceAPIRequestLog +} + +var map_PerUserAPIRequestCount = map[string]string{ + "": "PerUserAPIRequestCount contains logs of a user's requests.", + "username": "username that made the request.", + "userAgent": "userAgent that made the request. The same user often has multiple binaries which connect (pods with many containers). The different binaries will have different userAgents, but the same user. In addition, we have userAgents with version information embedded and the userName isn't likely to change.", + "requestCount": "requestCount of requests by the user across all verbs.", + "byVerb": "byVerb details by verb.", +} + +func (PerUserAPIRequestCount) SwaggerDoc() map[string]string { + return map_PerUserAPIRequestCount +} + +var map_PerVerbAPIRequestCount = map[string]string{ + "": "PerVerbAPIRequestCount requestCounts requests by API request verb.", + "verb": "verb of API request (get, list, create, etc...)", + "requestCount": "requestCount of requests for verb.", +} + +func (PerVerbAPIRequestCount) SwaggerDoc() map[string]string { + return map_PerVerbAPIRequestCount +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/apps/v1/consts.go b/vendor/github.com/openshift/api/apps/v1/consts.go new file mode 100644 index 0000000000000..212578bccfb1b --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/consts.go @@ -0,0 +1,108 @@ +package v1 + +const ( + // DeploymentStatusReasonAnnotation represents the reason for deployment being in a given state + // Used for specifying the reason for cancellation or failure of a deployment + // This is on replication controller set by deployer controller. + DeploymentStatusReasonAnnotation = "openshift.io/deployment.status-reason" + + // DeploymentPodAnnotation is an annotation on a deployment (a ReplicationController). The + // annotation value is the name of the deployer Pod which will act upon the ReplicationController + // to implement the deployment behavior. + // This is set on replication controller by deployer controller. + DeploymentPodAnnotation = "openshift.io/deployer-pod.name" + + // DeploymentConfigAnnotation is an annotation name used to correlate a deployment with the + // DeploymentConfig on which the deployment is based. + // This is set on replication controller pod template by deployer controller. + DeploymentConfigAnnotation = "openshift.io/deployment-config.name" + + // DeploymentCancelledAnnotation indicates that the deployment has been cancelled + // The annotation value does not matter and its mere presence indicates cancellation. + // This is set on replication controller by deployment config controller or oc rollout cancel command. + DeploymentCancelledAnnotation = "openshift.io/deployment.cancelled" + + // DeploymentEncodedConfigAnnotation is an annotation name used to retrieve specific encoded + // DeploymentConfig on which a given deployment is based. + // This is set on replication controller by deployer controller. + DeploymentEncodedConfigAnnotation = "openshift.io/encoded-deployment-config" + + // DeploymentVersionAnnotation is an annotation on a deployment (a ReplicationController). The + // annotation value is the LatestVersion value of the DeploymentConfig which was the basis for + // the deployment. + // This is set on replication controller pod template by deployment config controller. + DeploymentVersionAnnotation = "openshift.io/deployment-config.latest-version" + + // DeployerPodForDeploymentLabel is a label which groups pods related to a + // deployment. The value is a deployment name. The deployer pod and hook pods + // created by the internal strategies will have this label. Custom + // strategies can apply this label to any pods they create, enabling + // platform-provided cancellation and garbage collection support. + // This is set on deployer pod by deployer controller. + DeployerPodForDeploymentLabel = "openshift.io/deployer-pod-for.name" + + // DeploymentStatusAnnotation is an annotation name used to retrieve the DeploymentPhase of + // a deployment. + // This is set on replication controller by deployer controller. + DeploymentStatusAnnotation = "openshift.io/deployment.phase" +) + +type DeploymentConditionReason string + +var ( + // ReplicationControllerUpdatedReason is added in a deployment config when one of its replication + // controllers is updated as part of the rollout process. + ReplicationControllerUpdatedReason DeploymentConditionReason = "ReplicationControllerUpdated" + + // ReplicationControllerCreateError is added in a deployment config when it cannot create a new replication + // controller. + ReplicationControllerCreateErrorReason DeploymentConditionReason = "ReplicationControllerCreateError" + + // ReplicationControllerCreatedReason is added in a deployment config when it creates a new replication + // controller. + NewReplicationControllerCreatedReason DeploymentConditionReason = "NewReplicationControllerCreated" + + // NewReplicationControllerAvailableReason is added in a deployment config when its newest replication controller is made + // available ie. the number of new pods that have passed readiness checks and run for at least + // minReadySeconds is at least the minimum available pods that need to run for the deployment config. + NewReplicationControllerAvailableReason DeploymentConditionReason = "NewReplicationControllerAvailable" + + // ProgressDeadlineExceededReason is added in a deployment config when its newest replication controller fails to show + // any progress within the given deadline (progressDeadlineSeconds). + ProgressDeadlineExceededReason DeploymentConditionReason = "ProgressDeadlineExceeded" + + // DeploymentConfigPausedReason is added in a deployment config when it is paused. Lack of progress shouldn't be + // estimated once a deployment config is paused. + DeploymentConfigPausedReason DeploymentConditionReason = "DeploymentConfigPaused" + + // DeploymentConfigResumedReason is added in a deployment config when it is resumed. Useful for not failing accidentally + // deployment configs that paused amidst a rollout. + DeploymentConfigResumedReason DeploymentConditionReason = "DeploymentConfigResumed" + + // RolloutCancelledReason is added in a deployment config when its newest rollout was + // interrupted by cancellation. + RolloutCancelledReason DeploymentConditionReason = "RolloutCancelled" +) + +// DeploymentStatus describes the possible states a deployment can be in. +type DeploymentStatus string + +var ( + + // DeploymentStatusNew means the deployment has been accepted but not yet acted upon. + DeploymentStatusNew DeploymentStatus = "New" + + // DeploymentStatusPending means the deployment been handed over to a deployment strategy, + // but the strategy has not yet declared the deployment to be running. + DeploymentStatusPending DeploymentStatus = "Pending" + + // DeploymentStatusRunning means the deployment strategy has reported the deployment as + // being in-progress. + DeploymentStatusRunning DeploymentStatus = "Running" + + // DeploymentStatusComplete means the deployment finished without an error. + DeploymentStatusComplete DeploymentStatus = "Complete" + + // DeploymentStatusFailed means the deployment finished with an error. + DeploymentStatusFailed DeploymentStatus = "Failed" +) diff --git a/vendor/github.com/openshift/api/apps/v1/deprecated_consts.go b/vendor/github.com/openshift/api/apps/v1/deprecated_consts.go new file mode 100644 index 0000000000000..31969786c4d1b --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/deprecated_consts.go @@ -0,0 +1,38 @@ +package v1 + +// This file contains consts that are not shared between components and set just internally. +// They will likely be removed in (near) future. + +const ( + // DeployerPodCreatedAtAnnotation is an annotation on a deployment that + // records the time in RFC3339 format of when the deployer pod for this particular + // deployment was created. + // This is set by deployer controller, but not consumed by any command or internally. + // DEPRECATED: will be removed soon + DeployerPodCreatedAtAnnotation = "openshift.io/deployer-pod.created-at" + + // DeployerPodStartedAtAnnotation is an annotation on a deployment that + // records the time in RFC3339 format of when the deployer pod for this particular + // deployment was started. + // This is set by deployer controller, but not consumed by any command or internally. + // DEPRECATED: will be removed soon + DeployerPodStartedAtAnnotation = "openshift.io/deployer-pod.started-at" + + // DeployerPodCompletedAtAnnotation is an annotation on deployment that records + // the time in RFC3339 format of when the deployer pod finished. + // This is set by deployer controller, but not consumed by any command or internally. + // DEPRECATED: will be removed soon + DeployerPodCompletedAtAnnotation = "openshift.io/deployer-pod.completed-at" + + // DesiredReplicasAnnotation represents the desired number of replicas for a + // new deployment. + // This is set by deployer controller, but not consumed by any command or internally. + // DEPRECATED: will be removed soon + DesiredReplicasAnnotation = "kubectl.kubernetes.io/desired-replicas" + + // DeploymentAnnotation is an annotation on a deployer Pod. The annotation value is the name + // of the deployment (a ReplicationController) on which the deployer Pod acts. + // This is set by deployer controller and consumed internally and in oc adm top command. + // DEPRECATED: will be removed soon + DeploymentAnnotation = "openshift.io/deployment.name" +) diff --git a/vendor/github.com/openshift/api/apps/v1/doc.go b/vendor/github.com/openshift/api/apps/v1/doc.go new file mode 100644 index 0000000000000..f0fb3f59a2c39 --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/doc.go @@ -0,0 +1,9 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/apps/apis/apps +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true + +// +groupName=apps.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/apps/v1/generated.pb.go b/vendor/github.com/openshift/api/apps/v1/generated.pb.go new file mode 100644 index 0000000000000..18ed8b9310a6f --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/generated.pb.go @@ -0,0 +1,7461 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/apps/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + v11 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *CustomDeploymentStrategyParams) Reset() { *m = CustomDeploymentStrategyParams{} } +func (*CustomDeploymentStrategyParams) ProtoMessage() {} +func (*CustomDeploymentStrategyParams) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{0} +} +func (m *CustomDeploymentStrategyParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomDeploymentStrategyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomDeploymentStrategyParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomDeploymentStrategyParams.Merge(m, src) +} +func (m *CustomDeploymentStrategyParams) XXX_Size() int { + return m.Size() +} +func (m *CustomDeploymentStrategyParams) XXX_DiscardUnknown() { + xxx_messageInfo_CustomDeploymentStrategyParams.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomDeploymentStrategyParams proto.InternalMessageInfo + +func (m *DeploymentCause) Reset() { *m = DeploymentCause{} } +func (*DeploymentCause) ProtoMessage() {} +func (*DeploymentCause) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{1} +} +func (m *DeploymentCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCause.Merge(m, src) +} +func (m *DeploymentCause) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCause) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCause.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentCause proto.InternalMessageInfo + +func (m *DeploymentCauseImageTrigger) Reset() { *m = DeploymentCauseImageTrigger{} } +func (*DeploymentCauseImageTrigger) ProtoMessage() {} +func (*DeploymentCauseImageTrigger) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{2} +} +func (m *DeploymentCauseImageTrigger) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCauseImageTrigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCauseImageTrigger) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCauseImageTrigger.Merge(m, src) +} +func (m *DeploymentCauseImageTrigger) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCauseImageTrigger) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCauseImageTrigger.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentCauseImageTrigger proto.InternalMessageInfo + +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{3} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo + +func (m *DeploymentConfig) Reset() { *m = DeploymentConfig{} } +func (*DeploymentConfig) ProtoMessage() {} +func (*DeploymentConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{4} +} +func (m *DeploymentConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfig.Merge(m, src) +} +func (m *DeploymentConfig) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfig) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentConfig proto.InternalMessageInfo + +func (m *DeploymentConfigList) Reset() { *m = DeploymentConfigList{} } +func (*DeploymentConfigList) ProtoMessage() {} +func (*DeploymentConfigList) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{5} +} +func (m *DeploymentConfigList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfigList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfigList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfigList.Merge(m, src) +} +func (m *DeploymentConfigList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfigList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfigList.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentConfigList proto.InternalMessageInfo + +func (m *DeploymentConfigRollback) Reset() { *m = DeploymentConfigRollback{} } +func (*DeploymentConfigRollback) ProtoMessage() {} +func (*DeploymentConfigRollback) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{6} +} +func (m *DeploymentConfigRollback) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfigRollback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfigRollback) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfigRollback.Merge(m, src) +} +func (m *DeploymentConfigRollback) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfigRollback) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfigRollback.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentConfigRollback proto.InternalMessageInfo + +func (m *DeploymentConfigRollbackSpec) Reset() { *m = DeploymentConfigRollbackSpec{} } +func (*DeploymentConfigRollbackSpec) ProtoMessage() {} +func (*DeploymentConfigRollbackSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{7} +} +func (m *DeploymentConfigRollbackSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfigRollbackSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfigRollbackSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfigRollbackSpec.Merge(m, src) +} +func (m *DeploymentConfigRollbackSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfigRollbackSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfigRollbackSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentConfigRollbackSpec proto.InternalMessageInfo + +func (m *DeploymentConfigSpec) Reset() { *m = DeploymentConfigSpec{} } +func (*DeploymentConfigSpec) ProtoMessage() {} +func (*DeploymentConfigSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{8} +} +func (m *DeploymentConfigSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfigSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfigSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfigSpec.Merge(m, src) +} +func (m *DeploymentConfigSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfigSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfigSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentConfigSpec proto.InternalMessageInfo + +func (m *DeploymentConfigStatus) Reset() { *m = DeploymentConfigStatus{} } +func (*DeploymentConfigStatus) ProtoMessage() {} +func (*DeploymentConfigStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{9} +} +func (m *DeploymentConfigStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfigStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfigStatus.Merge(m, src) +} +func (m *DeploymentConfigStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfigStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfigStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentConfigStatus proto.InternalMessageInfo + +func (m *DeploymentDetails) Reset() { *m = DeploymentDetails{} } +func (*DeploymentDetails) ProtoMessage() {} +func (*DeploymentDetails) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{10} +} +func (m *DeploymentDetails) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentDetails) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentDetails.Merge(m, src) +} +func (m *DeploymentDetails) XXX_Size() int { + return m.Size() +} +func (m *DeploymentDetails) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentDetails.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentDetails proto.InternalMessageInfo + +func (m *DeploymentLog) Reset() { *m = DeploymentLog{} } +func (*DeploymentLog) ProtoMessage() {} +func (*DeploymentLog) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{11} +} +func (m *DeploymentLog) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentLog) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentLog.Merge(m, src) +} +func (m *DeploymentLog) XXX_Size() int { + return m.Size() +} +func (m *DeploymentLog) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentLog.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentLog proto.InternalMessageInfo + +func (m *DeploymentLogOptions) Reset() { *m = DeploymentLogOptions{} } +func (*DeploymentLogOptions) ProtoMessage() {} +func (*DeploymentLogOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{12} +} +func (m *DeploymentLogOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentLogOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentLogOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentLogOptions.Merge(m, src) +} +func (m *DeploymentLogOptions) XXX_Size() int { + return m.Size() +} +func (m *DeploymentLogOptions) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentLogOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentLogOptions proto.InternalMessageInfo + +func (m *DeploymentRequest) Reset() { *m = DeploymentRequest{} } +func (*DeploymentRequest) ProtoMessage() {} +func (*DeploymentRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{13} +} +func (m *DeploymentRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentRequest.Merge(m, src) +} +func (m *DeploymentRequest) XXX_Size() int { + return m.Size() +} +func (m *DeploymentRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentRequest proto.InternalMessageInfo + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{14} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo + +func (m *DeploymentTriggerImageChangeParams) Reset() { *m = DeploymentTriggerImageChangeParams{} } +func (*DeploymentTriggerImageChangeParams) ProtoMessage() {} +func (*DeploymentTriggerImageChangeParams) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{15} +} +func (m *DeploymentTriggerImageChangeParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentTriggerImageChangeParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentTriggerImageChangeParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentTriggerImageChangeParams.Merge(m, src) +} +func (m *DeploymentTriggerImageChangeParams) XXX_Size() int { + return m.Size() +} +func (m *DeploymentTriggerImageChangeParams) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentTriggerImageChangeParams.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentTriggerImageChangeParams proto.InternalMessageInfo + +func (m *DeploymentTriggerPolicies) Reset() { *m = DeploymentTriggerPolicies{} } +func (*DeploymentTriggerPolicies) ProtoMessage() {} +func (*DeploymentTriggerPolicies) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{16} +} +func (m *DeploymentTriggerPolicies) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentTriggerPolicies) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentTriggerPolicies) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentTriggerPolicies.Merge(m, src) +} +func (m *DeploymentTriggerPolicies) XXX_Size() int { + return m.Size() +} +func (m *DeploymentTriggerPolicies) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentTriggerPolicies.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentTriggerPolicies proto.InternalMessageInfo + +func (m *DeploymentTriggerPolicy) Reset() { *m = DeploymentTriggerPolicy{} } +func (*DeploymentTriggerPolicy) ProtoMessage() {} +func (*DeploymentTriggerPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{17} +} +func (m *DeploymentTriggerPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentTriggerPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentTriggerPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentTriggerPolicy.Merge(m, src) +} +func (m *DeploymentTriggerPolicy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentTriggerPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentTriggerPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentTriggerPolicy proto.InternalMessageInfo + +func (m *ExecNewPodHook) Reset() { *m = ExecNewPodHook{} } +func (*ExecNewPodHook) ProtoMessage() {} +func (*ExecNewPodHook) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{18} +} +func (m *ExecNewPodHook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExecNewPodHook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExecNewPodHook) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecNewPodHook.Merge(m, src) +} +func (m *ExecNewPodHook) XXX_Size() int { + return m.Size() +} +func (m *ExecNewPodHook) XXX_DiscardUnknown() { + xxx_messageInfo_ExecNewPodHook.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecNewPodHook proto.InternalMessageInfo + +func (m *LifecycleHook) Reset() { *m = LifecycleHook{} } +func (*LifecycleHook) ProtoMessage() {} +func (*LifecycleHook) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{19} +} +func (m *LifecycleHook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LifecycleHook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LifecycleHook) XXX_Merge(src proto.Message) { + xxx_messageInfo_LifecycleHook.Merge(m, src) +} +func (m *LifecycleHook) XXX_Size() int { + return m.Size() +} +func (m *LifecycleHook) XXX_DiscardUnknown() { + xxx_messageInfo_LifecycleHook.DiscardUnknown(m) +} + +var xxx_messageInfo_LifecycleHook proto.InternalMessageInfo + +func (m *RecreateDeploymentStrategyParams) Reset() { *m = RecreateDeploymentStrategyParams{} } +func (*RecreateDeploymentStrategyParams) ProtoMessage() {} +func (*RecreateDeploymentStrategyParams) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{20} +} +func (m *RecreateDeploymentStrategyParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RecreateDeploymentStrategyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RecreateDeploymentStrategyParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_RecreateDeploymentStrategyParams.Merge(m, src) +} +func (m *RecreateDeploymentStrategyParams) XXX_Size() int { + return m.Size() +} +func (m *RecreateDeploymentStrategyParams) XXX_DiscardUnknown() { + xxx_messageInfo_RecreateDeploymentStrategyParams.DiscardUnknown(m) +} + +var xxx_messageInfo_RecreateDeploymentStrategyParams proto.InternalMessageInfo + +func (m *RollingDeploymentStrategyParams) Reset() { *m = RollingDeploymentStrategyParams{} } +func (*RollingDeploymentStrategyParams) ProtoMessage() {} +func (*RollingDeploymentStrategyParams) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{21} +} +func (m *RollingDeploymentStrategyParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingDeploymentStrategyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingDeploymentStrategyParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingDeploymentStrategyParams.Merge(m, src) +} +func (m *RollingDeploymentStrategyParams) XXX_Size() int { + return m.Size() +} +func (m *RollingDeploymentStrategyParams) XXX_DiscardUnknown() { + xxx_messageInfo_RollingDeploymentStrategyParams.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingDeploymentStrategyParams proto.InternalMessageInfo + +func (m *TagImageHook) Reset() { *m = TagImageHook{} } +func (*TagImageHook) ProtoMessage() {} +func (*TagImageHook) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{22} +} +func (m *TagImageHook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagImageHook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagImageHook) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagImageHook.Merge(m, src) +} +func (m *TagImageHook) XXX_Size() int { + return m.Size() +} +func (m *TagImageHook) XXX_DiscardUnknown() { + xxx_messageInfo_TagImageHook.DiscardUnknown(m) +} + +var xxx_messageInfo_TagImageHook proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CustomDeploymentStrategyParams)(nil), "github.com.openshift.api.apps.v1.CustomDeploymentStrategyParams") + proto.RegisterType((*DeploymentCause)(nil), "github.com.openshift.api.apps.v1.DeploymentCause") + proto.RegisterType((*DeploymentCauseImageTrigger)(nil), "github.com.openshift.api.apps.v1.DeploymentCauseImageTrigger") + proto.RegisterType((*DeploymentCondition)(nil), "github.com.openshift.api.apps.v1.DeploymentCondition") + proto.RegisterType((*DeploymentConfig)(nil), "github.com.openshift.api.apps.v1.DeploymentConfig") + proto.RegisterType((*DeploymentConfigList)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigList") + proto.RegisterType((*DeploymentConfigRollback)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigRollback") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigRollback.UpdatedAnnotationsEntry") + proto.RegisterType((*DeploymentConfigRollbackSpec)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigRollbackSpec") + proto.RegisterType((*DeploymentConfigSpec)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigSpec") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigSpec.SelectorEntry") + proto.RegisterType((*DeploymentConfigStatus)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigStatus") + proto.RegisterType((*DeploymentDetails)(nil), "github.com.openshift.api.apps.v1.DeploymentDetails") + proto.RegisterType((*DeploymentLog)(nil), "github.com.openshift.api.apps.v1.DeploymentLog") + proto.RegisterType((*DeploymentLogOptions)(nil), "github.com.openshift.api.apps.v1.DeploymentLogOptions") + proto.RegisterType((*DeploymentRequest)(nil), "github.com.openshift.api.apps.v1.DeploymentRequest") + proto.RegisterType((*DeploymentStrategy)(nil), "github.com.openshift.api.apps.v1.DeploymentStrategy") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentStrategy.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentStrategy.LabelsEntry") + proto.RegisterType((*DeploymentTriggerImageChangeParams)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerImageChangeParams") + proto.RegisterType((*DeploymentTriggerPolicies)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerPolicies") + proto.RegisterType((*DeploymentTriggerPolicy)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerPolicy") + proto.RegisterType((*ExecNewPodHook)(nil), "github.com.openshift.api.apps.v1.ExecNewPodHook") + proto.RegisterType((*LifecycleHook)(nil), "github.com.openshift.api.apps.v1.LifecycleHook") + proto.RegisterType((*RecreateDeploymentStrategyParams)(nil), "github.com.openshift.api.apps.v1.RecreateDeploymentStrategyParams") + proto.RegisterType((*RollingDeploymentStrategyParams)(nil), "github.com.openshift.api.apps.v1.RollingDeploymentStrategyParams") + proto.RegisterType((*TagImageHook)(nil), "github.com.openshift.api.apps.v1.TagImageHook") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/apps/v1/generated.proto", fileDescriptor_8f1b1bee37da74c1) +} + +var fileDescriptor_8f1b1bee37da74c1 = []byte{ + // 2523 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x5a, 0xcd, 0x6f, 0x1c, 0x49, + 0x15, 0x77, 0x7b, 0x66, 0xec, 0x99, 0xe7, 0xaf, 0xb8, 0x9c, 0x8f, 0x59, 0x2f, 0xf2, 0x58, 0xb3, + 0xda, 0xc5, 0xc0, 0x32, 0xb3, 0xf1, 0x86, 0xd5, 0x26, 0xd1, 0x2e, 0x78, 0x1c, 0x67, 0xd7, 0xd1, + 0x38, 0x31, 0x65, 0x27, 0x21, 0x11, 0x82, 0x94, 0x7b, 0xca, 0xe3, 0x5a, 0x77, 0x77, 0x0d, 0xdd, + 0x35, 0x93, 0x0c, 0x42, 0x68, 0x2f, 0x20, 0x21, 0xed, 0x81, 0x23, 0x5c, 0x10, 0x07, 0xae, 0x20, + 0x0e, 0xdc, 0x11, 0x07, 0xa4, 0x1c, 0x40, 0x5a, 0x09, 0x09, 0x56, 0x08, 0x59, 0x1b, 0x73, 0xe3, + 0x4f, 0xc8, 0x09, 0xd5, 0x47, 0x7f, 0xcd, 0x47, 0xec, 0x71, 0x72, 0x73, 0xbf, 0x8f, 0xdf, 0x7b, + 0xf5, 0xea, 0xbd, 0x57, 0xaf, 0x6a, 0x0c, 0xef, 0x34, 0x99, 0x38, 0x68, 0xef, 0x55, 0x6c, 0xee, + 0x56, 0x79, 0x8b, 0x7a, 0xc1, 0x01, 0xdb, 0x17, 0x55, 0xd2, 0x62, 0x55, 0xd2, 0x6a, 0x05, 0xd5, + 0xce, 0xe5, 0x6a, 0x93, 0x7a, 0xd4, 0x27, 0x82, 0x36, 0x2a, 0x2d, 0x9f, 0x0b, 0x8e, 0x96, 0x63, + 0x8d, 0x4a, 0xa4, 0x51, 0x21, 0x2d, 0x56, 0x91, 0x1a, 0x95, 0xce, 0xe5, 0xc5, 0x6f, 0x26, 0x30, + 0x9b, 0xbc, 0xc9, 0xab, 0x4a, 0x71, 0xaf, 0xbd, 0xaf, 0xbe, 0xd4, 0x87, 0xfa, 0x4b, 0x03, 0x2e, + 0x96, 0x0f, 0xdf, 0x0f, 0x2a, 0x8c, 0x2b, 0xa3, 0x36, 0xf7, 0xe9, 0x00, 0xa3, 0x8b, 0x57, 0x62, + 0x19, 0x97, 0xd8, 0x07, 0xcc, 0xa3, 0x7e, 0xb7, 0xda, 0x3a, 0x6c, 0x4a, 0x42, 0x50, 0x75, 0xa9, + 0x20, 0x83, 0xb4, 0xde, 0x1b, 0xa6, 0xe5, 0xb7, 0x3d, 0xc1, 0x5c, 0x5a, 0x0d, 0xec, 0x03, 0xea, + 0x92, 0x3e, 0xbd, 0x77, 0x87, 0xe9, 0xb5, 0x05, 0x73, 0xaa, 0xcc, 0x13, 0x81, 0xf0, 0x7b, 0x95, + 0xca, 0x7f, 0xb6, 0x60, 0x69, 0xbd, 0x1d, 0x08, 0xee, 0xde, 0xa0, 0x2d, 0x87, 0x77, 0x5d, 0xea, + 0x89, 0x1d, 0x21, 0x25, 0x9a, 0xdd, 0x6d, 0xe2, 0x13, 0x37, 0x40, 0x6f, 0x40, 0x8e, 0xb9, 0xa4, + 0x49, 0x8b, 0xd6, 0xb2, 0xb5, 0x52, 0xa8, 0xcd, 0x3c, 0x3d, 0x2a, 0x8d, 0x1d, 0x1f, 0x95, 0x72, + 0x9b, 0x92, 0x88, 0x35, 0x0f, 0x7d, 0x17, 0xa6, 0xa8, 0xd7, 0x61, 0x3e, 0xf7, 0x24, 0x42, 0x71, + 0x7c, 0x39, 0xb3, 0x32, 0xb5, 0xba, 0x58, 0xd1, 0x2e, 0xa9, 0x38, 0xcb, 0x20, 0x55, 0x3a, 0x97, + 0x2b, 0x1b, 0x5e, 0xe7, 0x1e, 0xf1, 0x6b, 0x0b, 0x06, 0x66, 0x6a, 0x23, 0x56, 0xc3, 0x49, 0x0c, + 0xf4, 0x26, 0x4c, 0xda, 0xdc, 0x75, 0x89, 0xd7, 0x28, 0x66, 0x96, 0x33, 0x2b, 0x85, 0xda, 0xd4, + 0xf1, 0x51, 0x69, 0x72, 0x5d, 0x93, 0x70, 0xc8, 0x2b, 0xff, 0xc5, 0x82, 0xb9, 0xd8, 0xf7, 0x75, + 0xd2, 0x0e, 0x28, 0xba, 0x0a, 0x59, 0xd1, 0x6d, 0x85, 0x1e, 0xbf, 0x69, 0x4c, 0x65, 0x77, 0xbb, + 0x2d, 0xfa, 0xfc, 0xa8, 0x74, 0x21, 0x16, 0xdf, 0xf5, 0x59, 0xb3, 0x49, 0x7d, 0xc9, 0xc0, 0x4a, + 0x05, 0x05, 0x30, 0xad, 0x56, 0x64, 0x38, 0xc5, 0xf1, 0x65, 0x6b, 0x65, 0x6a, 0xf5, 0x83, 0xca, + 0x49, 0xf9, 0x53, 0xe9, 0xf1, 0x61, 0x33, 0x01, 0x52, 0x3b, 0x77, 0x7c, 0x54, 0x9a, 0x4e, 0x52, + 0x70, 0xca, 0x48, 0xb9, 0x01, 0xaf, 0xbf, 0x40, 0x1d, 0x6d, 0x40, 0x76, 0xdf, 0xe7, 0xae, 0x5a, + 0xce, 0xd4, 0xea, 0x1b, 0x83, 0xa2, 0x7a, 0x67, 0xef, 0x13, 0x6a, 0x0b, 0x4c, 0xf7, 0xa9, 0x4f, + 0x3d, 0x9b, 0xd6, 0xa6, 0xc3, 0x35, 0xdf, 0xf4, 0xb9, 0x8b, 0x95, 0x7a, 0xf9, 0x5f, 0x19, 0x58, + 0x48, 0x98, 0xe1, 0x5e, 0x83, 0x09, 0xc6, 0x3d, 0x74, 0x3d, 0x15, 0xad, 0xaf, 0xf6, 0x44, 0xeb, + 0xd2, 0x00, 0x95, 0x44, 0xbc, 0xea, 0x30, 0x11, 0x08, 0x22, 0xda, 0x81, 0x8a, 0x54, 0xa1, 0x76, + 0xc5, 0xa8, 0x4f, 0xec, 0x28, 0xea, 0xf3, 0xa3, 0xd2, 0x80, 0x4a, 0xa9, 0x44, 0x48, 0x5a, 0x0a, + 0x1b, 0x0c, 0xf4, 0x09, 0xcc, 0x3a, 0x24, 0x10, 0x77, 0x5b, 0x0d, 0x22, 0xe8, 0x2e, 0x73, 0x69, + 0x71, 0x42, 0xad, 0xf9, 0xeb, 0x89, 0x35, 0x47, 0xc9, 0x5d, 0x69, 0x1d, 0x36, 0x25, 0x21, 0xa8, + 0xc8, 0x52, 0x92, 0x51, 0x90, 0x1a, 0xb5, 0x8b, 0xc6, 0x83, 0xd9, 0x7a, 0x0a, 0x09, 0xf7, 0x20, + 0xa3, 0x0e, 0x20, 0x49, 0xd9, 0xf5, 0x89, 0x17, 0xe8, 0x55, 0x49, 0x7b, 0x99, 0x91, 0xed, 0x2d, + 0x1a, 0x7b, 0xa8, 0xde, 0x87, 0x86, 0x07, 0x58, 0x40, 0x6f, 0xc1, 0x84, 0x4f, 0x49, 0xc0, 0xbd, + 0x62, 0x56, 0x45, 0x6c, 0x36, 0x8c, 0x18, 0x56, 0x54, 0x6c, 0xb8, 0xe8, 0x6b, 0x30, 0xe9, 0xd2, + 0x20, 0x90, 0x95, 0x97, 0x53, 0x82, 0x73, 0x46, 0x70, 0x72, 0x4b, 0x93, 0x71, 0xc8, 0x2f, 0xff, + 0x71, 0x1c, 0xce, 0xa5, 0xb6, 0x69, 0x9f, 0x35, 0xd1, 0x23, 0xc8, 0x4b, 0x3f, 0x1b, 0x44, 0x10, + 0x93, 0x39, 0xef, 0x9c, 0x6e, 0x55, 0x3a, 0x97, 0xb6, 0xa8, 0x20, 0x35, 0x64, 0x4c, 0x42, 0x4c, + 0xc3, 0x11, 0x2a, 0xfa, 0x1e, 0x64, 0x83, 0x16, 0xb5, 0x4d, 0x8d, 0xbc, 0x37, 0x52, 0x8d, 0x28, + 0x1f, 0x77, 0x5a, 0xd4, 0x8e, 0x53, 0x55, 0x7e, 0x61, 0x85, 0x88, 0x1e, 0x45, 0x59, 0xa5, 0xf7, + 0xe3, 0xfd, 0x33, 0x60, 0x2b, 0xfd, 0x38, 0xba, 0xe9, 0x4c, 0x2b, 0xff, 0xdd, 0x82, 0xf3, 0xbd, + 0x2a, 0x75, 0x16, 0x08, 0xf4, 0xfd, 0xbe, 0xb0, 0x55, 0x4e, 0x17, 0x36, 0xa9, 0xad, 0x82, 0x76, + 0xce, 0x98, 0xcc, 0x87, 0x94, 0x44, 0xc8, 0xee, 0x43, 0x8e, 0x09, 0xea, 0x06, 0xa6, 0x43, 0xae, + 0x8e, 0xbe, 0xae, 0x44, 0x03, 0x96, 0x40, 0x58, 0xe3, 0x95, 0x7f, 0x9e, 0x81, 0x62, 0xaf, 0x28, + 0xe6, 0x8e, 0xb3, 0x47, 0xec, 0x43, 0xb4, 0x0c, 0x59, 0x8f, 0xb8, 0x61, 0x85, 0x47, 0x01, 0xbf, + 0x4d, 0x5c, 0x8a, 0x15, 0x07, 0xfd, 0xc6, 0x02, 0xd4, 0x56, 0xb5, 0xd1, 0x58, 0xf3, 0x3c, 0x2e, + 0x88, 0x4c, 0xd7, 0xd0, 0x4b, 0x3c, 0xba, 0x97, 0xa1, 0xe9, 0xca, 0xdd, 0x3e, 0xd0, 0x0d, 0x4f, + 0xf8, 0xdd, 0xb8, 0x6a, 0xfa, 0x05, 0xf0, 0x00, 0x4f, 0xd0, 0x23, 0x93, 0x6b, 0x3a, 0x1f, 0x3e, + 0x3c, 0xbb, 0x47, 0xc3, 0x72, 0x6e, 0x71, 0x03, 0x2e, 0x0d, 0x71, 0x16, 0x9d, 0x83, 0xcc, 0x21, + 0xed, 0xea, 0xf0, 0x61, 0xf9, 0x27, 0x3a, 0x0f, 0xb9, 0x0e, 0x71, 0xda, 0x54, 0x77, 0x3d, 0xac, + 0x3f, 0xae, 0x8d, 0xbf, 0x6f, 0x95, 0xff, 0x94, 0x81, 0xaf, 0xbc, 0xc8, 0xf6, 0x2b, 0xea, 0xe6, + 0xe8, 0x6d, 0xc8, 0xfb, 0xb4, 0xc3, 0x02, 0xc6, 0x3d, 0xe5, 0x44, 0x26, 0xce, 0x3b, 0x6c, 0xe8, + 0x38, 0x92, 0x40, 0x6b, 0x30, 0xc7, 0x3c, 0xdb, 0x69, 0x37, 0xc2, 0x43, 0x45, 0x57, 0x56, 0xbe, + 0x76, 0xc9, 0x28, 0xcd, 0x6d, 0xa6, 0xd9, 0xb8, 0x57, 0x3e, 0x09, 0x41, 0xdd, 0x96, 0x43, 0x04, + 0x55, 0x0d, 0x6c, 0x00, 0x84, 0x61, 0xe3, 0x5e, 0x79, 0x74, 0x0f, 0x2e, 0x1a, 0x12, 0xa6, 0x2d, + 0x87, 0xd9, 0x2a, 0xc6, 0xb2, 0x42, 0x54, 0x87, 0xcb, 0xd7, 0x96, 0x0c, 0xd2, 0xc5, 0xcd, 0x81, + 0x52, 0x78, 0x88, 0x76, 0xc2, 0xb5, 0x70, 0x76, 0x51, 0xe7, 0x46, 0xbf, 0x6b, 0x21, 0x1b, 0xf7, + 0xca, 0x97, 0xff, 0x97, 0xeb, 0xef, 0x07, 0x6a, 0xbb, 0xf6, 0x20, 0x1f, 0x84, 0xa0, 0x7a, 0xcb, + 0xae, 0x8c, 0x92, 0x7c, 0xa1, 0x81, 0x78, 0x77, 0x22, 0x1f, 0x22, 0x5c, 0xe9, 0xbf, 0xcb, 0x3c, + 0x4c, 0x49, 0xa3, 0xbb, 0x43, 0x6d, 0xee, 0x35, 0x82, 0x62, 0x61, 0xd9, 0x5a, 0xc9, 0xc5, 0xfe, + 0x6f, 0xa5, 0xd9, 0xb8, 0x57, 0x1e, 0x51, 0xc8, 0x8b, 0x70, 0x67, 0x75, 0x3f, 0xbe, 0x3e, 0x8a, + 0x9b, 0x66, 0x97, 0xb7, 0xb9, 0xc3, 0x6c, 0x46, 0x83, 0xda, 0xb4, 0xf4, 0x34, 0xca, 0x85, 0x08, + 0x5a, 0x67, 0x9d, 0x0a, 0xbe, 0x4e, 0xa0, 0x5c, 0x32, 0xeb, 0x34, 0x1d, 0x47, 0x12, 0xa8, 0x0e, + 0xe7, 0xc3, 0x0c, 0xfc, 0x98, 0x05, 0x82, 0xfb, 0xdd, 0x3a, 0x73, 0x99, 0x50, 0x79, 0x93, 0xab, + 0x15, 0x8f, 0x8f, 0x4a, 0xe7, 0xf1, 0x00, 0x3e, 0x1e, 0xa8, 0x25, 0xbb, 0x98, 0xa0, 0x81, 0x30, + 0xb9, 0x12, 0xd5, 0xc4, 0x2e, 0x0d, 0x04, 0x56, 0x1c, 0x79, 0xb4, 0xb6, 0xe4, 0xf4, 0xd4, 0x30, + 0xdb, 0x1f, 0x35, 0xff, 0x6d, 0x45, 0xc5, 0x86, 0x8b, 0x7c, 0xc8, 0x07, 0xd4, 0xa1, 0xb6, 0xe0, + 0x7e, 0x71, 0x52, 0xb5, 0xb8, 0x1b, 0x67, 0x3b, 0xbc, 0x2a, 0x3b, 0x06, 0x46, 0x37, 0xb5, 0x78, + 0x8f, 0x0d, 0x19, 0x47, 0x76, 0xd0, 0x16, 0xe4, 0x45, 0x58, 0x37, 0xf9, 0xe1, 0xa5, 0xbf, 0xcd, + 0x1b, 0x61, 0xb9, 0xe8, 0x4e, 0xa5, 0x36, 0x22, 0xac, 0xa8, 0x08, 0x62, 0xf1, 0x3a, 0xcc, 0xa4, + 0x6c, 0x8f, 0xd4, 0xa3, 0xfe, 0x90, 0x83, 0x8b, 0x83, 0xcf, 0x4b, 0x74, 0x1d, 0x66, 0x24, 0x7e, + 0x20, 0xee, 0x51, 0x5f, 0xf5, 0x16, 0x4b, 0xf5, 0x96, 0x0b, 0x66, 0x65, 0x33, 0xf5, 0x24, 0x13, + 0xa7, 0x65, 0xd1, 0x2d, 0x40, 0x7c, 0x2f, 0xa0, 0x7e, 0x87, 0x36, 0x3e, 0xd2, 0x17, 0x8d, 0xb8, + 0x3b, 0x45, 0x0d, 0xff, 0x4e, 0x9f, 0x04, 0x1e, 0xa0, 0x35, 0x62, 0xa6, 0xad, 0xc1, 0x9c, 0x39, + 0x34, 0x42, 0xa6, 0x49, 0xb2, 0xa8, 0x82, 0xee, 0xa6, 0xd9, 0xb8, 0x57, 0x1e, 0x7d, 0x04, 0xf3, + 0xa4, 0x43, 0x98, 0x43, 0xf6, 0x1c, 0x1a, 0x81, 0xe4, 0x14, 0xc8, 0x6b, 0x06, 0x64, 0x7e, 0xad, + 0x57, 0x00, 0xf7, 0xeb, 0xa0, 0x2d, 0x58, 0x68, 0x7b, 0xfd, 0x50, 0x13, 0x0a, 0xea, 0x75, 0x03, + 0xb5, 0x70, 0xb7, 0x5f, 0x04, 0x0f, 0xd2, 0x43, 0x0f, 0x61, 0xb2, 0x41, 0x05, 0x61, 0x4e, 0x50, + 0x9c, 0x54, 0x79, 0xf3, 0xee, 0x28, 0xb9, 0x7a, 0x43, 0xab, 0xea, 0xcb, 0x93, 0xf9, 0xc0, 0x21, + 0x20, 0x62, 0x00, 0x76, 0x38, 0x8a, 0x07, 0xc5, 0xbc, 0x2a, 0x85, 0x6f, 0x8d, 0x58, 0x0a, 0x5a, + 0x3b, 0x1e, 0x15, 0x23, 0x52, 0x80, 0x13, 0xe0, 0x32, 0xb1, 0x7c, 0xd9, 0xb0, 0xa2, 0x78, 0xe8, + 0x0e, 0x17, 0x25, 0x16, 0x4e, 0x32, 0x71, 0x5a, 0xb6, 0xfc, 0x6b, 0x0b, 0xe6, 0xfb, 0xd6, 0x94, + 0x9c, 0x90, 0xad, 0x17, 0x4f, 0xc8, 0xe8, 0x01, 0x4c, 0xd8, 0xb2, 0xf6, 0xc3, 0x91, 0xe6, 0xf2, + 0xc8, 0x17, 0xba, 0xb8, 0x99, 0xa8, 0xcf, 0x00, 0x1b, 0xc0, 0xf2, 0x1c, 0xcc, 0xc4, 0xa2, 0x75, + 0xde, 0x2c, 0x7f, 0x96, 0x4d, 0x1e, 0x25, 0x75, 0xde, 0xbc, 0xd3, 0xd2, 0x21, 0xa8, 0x42, 0xc1, + 0xe6, 0x9e, 0x20, 0x72, 0x80, 0x34, 0x1e, 0xcf, 0x1b, 0xd0, 0xc2, 0x7a, 0xc8, 0xc0, 0xb1, 0x8c, + 0xec, 0x67, 0xfb, 0xdc, 0x71, 0xf8, 0x63, 0x55, 0x43, 0x89, 0x7e, 0x76, 0x53, 0x51, 0xb1, 0xe1, + 0xca, 0x5a, 0x69, 0xc9, 0x96, 0xc9, 0xdb, 0xe1, 0xb1, 0x1e, 0xd5, 0xca, 0xb6, 0xa1, 0xe3, 0x48, + 0x02, 0x5d, 0x81, 0xe9, 0x80, 0x79, 0x36, 0x0d, 0x8f, 0x9a, 0xac, 0x9e, 0x1e, 0xe4, 0x1d, 0x75, + 0x27, 0x41, 0xc7, 0x29, 0x29, 0x74, 0x1f, 0x0a, 0xea, 0x5b, 0xdd, 0x92, 0x72, 0x23, 0xdf, 0x92, + 0x66, 0xe4, 0x22, 0x77, 0x42, 0x00, 0x1c, 0x63, 0xa1, 0x55, 0x00, 0xc1, 0x5c, 0x1a, 0x08, 0xe2, + 0xb6, 0x02, 0xd3, 0xb8, 0xa3, 0x64, 0xda, 0x8d, 0x38, 0x38, 0x21, 0x85, 0xbe, 0x01, 0x05, 0x99, + 0x02, 0x75, 0xe6, 0x51, 0x5d, 0x15, 0x19, 0x6d, 0x60, 0x37, 0x24, 0xe2, 0x98, 0x8f, 0x2a, 0x00, + 0x8e, 0x3c, 0x40, 0x6a, 0x5d, 0x41, 0x03, 0xd5, 0x7b, 0x33, 0xb5, 0x59, 0x09, 0x5e, 0x8f, 0xa8, + 0x38, 0x21, 0x21, 0xa3, 0xee, 0xf1, 0xc7, 0x84, 0x09, 0x95, 0xa2, 0x89, 0xa8, 0xdf, 0xe6, 0xf7, + 0x09, 0x13, 0xd8, 0x70, 0xd1, 0x9b, 0x30, 0xd9, 0x31, 0x4d, 0x12, 0x14, 0xa8, 0xaa, 0xb1, 0xb0, + 0x35, 0x86, 0xbc, 0xf2, 0xbf, 0x53, 0xb9, 0x8b, 0xe9, 0x8f, 0xda, 0xf2, 0xa8, 0x3a, 0x79, 0x24, + 0x7f, 0x0b, 0x26, 0x74, 0x77, 0xed, 0xdd, 0x7c, 0xdd, 0x82, 0xb1, 0xe1, 0xa2, 0x37, 0x20, 0xb7, + 0xcf, 0x7d, 0x9b, 0x9a, 0x9d, 0x8f, 0xae, 0x07, 0x37, 0x25, 0x11, 0x6b, 0x1e, 0xba, 0x07, 0x73, + 0xf4, 0x49, 0x7a, 0xfe, 0xcb, 0xaa, 0x47, 0x95, 0xb7, 0x65, 0x6f, 0xdc, 0x48, 0xb3, 0x86, 0xbf, + 0x91, 0xf4, 0x82, 0x94, 0xff, 0x31, 0x09, 0xa8, 0x7f, 0xd8, 0x41, 0xd7, 0x52, 0x4f, 0x0a, 0x6f, + 0xf5, 0x3c, 0x29, 0x5c, 0xec, 0xd7, 0x48, 0xbc, 0x28, 0x74, 0x60, 0xda, 0x56, 0x2f, 0x52, 0xfa, + 0xfd, 0xc9, 0x4c, 0x33, 0xdf, 0x39, 0xb9, 0x60, 0x5f, 0xfc, 0x8e, 0xa5, 0x13, 0x7c, 0x3d, 0x81, + 0x8c, 0x53, 0x76, 0xd0, 0x4f, 0x61, 0xd6, 0xa7, 0xb6, 0x4f, 0x89, 0xa0, 0xc6, 0xb2, 0xbe, 0x6b, + 0xd4, 0x4e, 0xb6, 0x8c, 0x8d, 0xde, 0x50, 0xdb, 0xe8, 0xf8, 0xa8, 0x34, 0x8b, 0x53, 0xe8, 0xb8, + 0xc7, 0x1a, 0xfa, 0x31, 0xcc, 0xf8, 0xdc, 0x71, 0x98, 0xd7, 0x34, 0xe6, 0xb3, 0xca, 0xfc, 0xda, + 0x29, 0xcc, 0x6b, 0xb5, 0xa1, 0xd6, 0xe7, 0x55, 0x7f, 0x4d, 0x62, 0xe3, 0xb4, 0x29, 0xf4, 0x00, + 0x0a, 0x3e, 0x0d, 0x78, 0xdb, 0xb7, 0x69, 0x60, 0x8a, 0x7b, 0x65, 0xd0, 0x74, 0x82, 0x8d, 0x90, + 0xcc, 0x62, 0xe6, 0x53, 0x69, 0x2b, 0x88, 0x7b, 0x58, 0xc8, 0x0d, 0x70, 0x8c, 0x86, 0x0e, 0x64, + 0x1a, 0xef, 0x51, 0x47, 0x96, 0x76, 0xe6, 0x74, 0x1b, 0xd9, 0xbf, 0x90, 0x4a, 0x5d, 0x41, 0xe8, + 0x29, 0x2b, 0x51, 0x08, 0x92, 0x88, 0x0d, 0x3e, 0xfa, 0x09, 0x4c, 0x91, 0xc4, 0xdd, 0x55, 0x0f, + 0x76, 0x1b, 0x67, 0x32, 0xd7, 0x77, 0x5d, 0x8d, 0x9e, 0x2b, 0x93, 0xf7, 0xd4, 0xa4, 0x39, 0x74, + 0x07, 0x2e, 0x10, 0x5b, 0xb0, 0x0e, 0xbd, 0x41, 0x49, 0xc3, 0x61, 0x5e, 0xd4, 0x5e, 0x75, 0xc3, + 0x79, 0xed, 0xf8, 0xa8, 0x74, 0x61, 0x6d, 0x90, 0x00, 0x1e, 0xac, 0xb7, 0x78, 0x15, 0xa6, 0x12, + 0xab, 0x1e, 0x65, 0xbe, 0x5b, 0xfc, 0x10, 0xce, 0xbd, 0xd4, 0x1d, 0xf6, 0x77, 0xe3, 0x50, 0xee, + 0x6b, 0x00, 0xea, 0x49, 0x72, 0xfd, 0x80, 0x78, 0xcd, 0x30, 0x63, 0xab, 0x50, 0x20, 0x6d, 0xc1, + 0x5d, 0x22, 0x98, 0xad, 0x80, 0xf3, 0x71, 0x2e, 0xac, 0x85, 0x0c, 0x1c, 0xcb, 0xa0, 0x6b, 0x30, + 0x1b, 0x1d, 0x6e, 0xb2, 0xd3, 0xe9, 0xd3, 0xb8, 0xa0, 0xcb, 0x63, 0x3d, 0xc5, 0xc1, 0x3d, 0x92, + 0xd1, 0xb5, 0x39, 0xf3, 0x72, 0xd7, 0xe6, 0x5b, 0xe1, 0xab, 0x9f, 0x5a, 0x13, 0x6d, 0xa8, 0x55, + 0x99, 0x97, 0xb8, 0x9e, 0x97, 0xbc, 0xa4, 0x04, 0x1e, 0xa0, 0x55, 0xfe, 0x99, 0x05, 0xaf, 0x0d, + 0xbd, 0x42, 0xa1, 0x1f, 0x84, 0x4f, 0x3d, 0x96, 0x4a, 0xc4, 0xab, 0x67, 0xbd, 0x8e, 0x75, 0x07, + 0xbf, 0xf8, 0x5c, 0xcb, 0xff, 0xea, 0xb7, 0xa5, 0xb1, 0x4f, 0xff, 0xb3, 0x3c, 0x56, 0xfe, 0xd2, + 0x82, 0x4b, 0x43, 0x74, 0x5f, 0xe6, 0x29, 0xfc, 0x17, 0x16, 0xcc, 0xb3, 0xde, 0x4d, 0x37, 0xed, + 0xf8, 0xc6, 0x19, 0x56, 0xd3, 0x97, 0x40, 0xb5, 0x0b, 0x72, 0xa6, 0xee, 0x23, 0xe3, 0x7e, 0xab, + 0xe5, 0x7f, 0x5a, 0x30, 0xbb, 0xf1, 0x84, 0xda, 0xb7, 0xe9, 0xe3, 0x6d, 0xde, 0xf8, 0x98, 0xf3, + 0xc3, 0xe4, 0xef, 0x03, 0xd6, 0xf0, 0xdf, 0x07, 0xd0, 0x55, 0xc8, 0x50, 0xaf, 0x73, 0x8a, 0x5f, + 0x24, 0xa6, 0x4c, 0x6c, 0x32, 0x1b, 0x5e, 0x07, 0x4b, 0x1d, 0x39, 0xb2, 0xa6, 0x92, 0x50, 0xe5, + 0x5e, 0x21, 0x1e, 0x59, 0x53, 0x19, 0x8b, 0xd3, 0xb2, 0x6a, 0x3a, 0xe0, 0x4e, 0x5b, 0x26, 0x79, + 0x36, 0x76, 0xef, 0x9e, 0x26, 0xe1, 0x90, 0x57, 0xfe, 0xfd, 0x38, 0xcc, 0xd4, 0xd9, 0x3e, 0xb5, + 0xbb, 0xb6, 0x43, 0xd5, 0xba, 0x1e, 0xc0, 0xcc, 0x3e, 0x61, 0x4e, 0xdb, 0xa7, 0x7a, 0x0b, 0xcd, + 0xd6, 0xbd, 0x1b, 0x5a, 0xbd, 0x99, 0x64, 0x3e, 0x3f, 0x2a, 0x2d, 0xa6, 0xd4, 0x53, 0x5c, 0x9c, + 0x46, 0x42, 0x8f, 0x00, 0x68, 0x14, 0x44, 0xb3, 0x93, 0xef, 0x9c, 0xbc, 0x93, 0xe9, 0xc0, 0xeb, + 0xd9, 0x29, 0xa6, 0xe1, 0x04, 0x26, 0xfa, 0xa1, 0x1c, 0xcc, 0x9a, 0x6a, 0x4b, 0x03, 0xf5, 0xb3, + 0xcd, 0xd4, 0x6a, 0xe5, 0x64, 0x03, 0xbb, 0x46, 0x45, 0xc1, 0x47, 0x2d, 0x24, 0xa4, 0xaa, 0x61, + 0xce, 0xfc, 0x59, 0xfe, 0xeb, 0x38, 0x2c, 0x9f, 0x74, 0xdc, 0xca, 0x3e, 0x23, 0x87, 0x45, 0xde, + 0x16, 0x61, 0x13, 0xd6, 0xb7, 0x58, 0xd5, 0x67, 0x76, 0x53, 0x1c, 0xdc, 0x23, 0x89, 0x6e, 0x41, + 0xa6, 0xe5, 0x53, 0x13, 0x9c, 0xea, 0xc9, 0xbe, 0xa7, 0xa2, 0x5f, 0x9b, 0x94, 0x09, 0xb4, 0xed, + 0x53, 0x2c, 0x41, 0x24, 0x96, 0xcb, 0x1a, 0xa6, 0x65, 0x9d, 0x0d, 0x6b, 0x8b, 0x35, 0xb0, 0x04, + 0x41, 0x5b, 0x90, 0x6d, 0xf1, 0x40, 0x98, 0xa9, 0x60, 0x64, 0xb0, 0xbc, 0xac, 0xfa, 0x6d, 0x1e, + 0x08, 0xac, 0x60, 0xca, 0x7f, 0xcb, 0x42, 0xe9, 0x84, 0xb9, 0x01, 0x6d, 0xc2, 0x82, 0xbe, 0x24, + 0x6f, 0x53, 0x9f, 0xf1, 0x46, 0x3a, 0x96, 0x97, 0xd4, 0x25, 0xb6, 0x9f, 0x8d, 0x07, 0xe9, 0xa0, + 0x0f, 0x60, 0x8e, 0x79, 0x82, 0xfa, 0x1d, 0xe2, 0x84, 0x30, 0xfa, 0x59, 0x60, 0x41, 0xbf, 0xce, + 0xa5, 0x58, 0xb8, 0x57, 0x76, 0xc0, 0x86, 0x66, 0x4e, 0xbd, 0xa1, 0x0e, 0xcc, 0xba, 0xe4, 0x49, + 0xe2, 0xba, 0x6d, 0x42, 0x38, 0xfc, 0xd7, 0x90, 0xb6, 0x60, 0x4e, 0x45, 0xff, 0x60, 0x5a, 0xd9, + 0xf4, 0xc4, 0x1d, 0x7f, 0x47, 0xf8, 0xcc, 0x6b, 0x6a, 0x6b, 0x5b, 0x29, 0x2c, 0xdc, 0x83, 0x8d, + 0x1e, 0x42, 0xde, 0x25, 0x4f, 0x76, 0xda, 0x7e, 0x33, 0xbc, 0x25, 0x8d, 0x6e, 0x47, 0xbd, 0xf9, + 0x6c, 0x19, 0x14, 0x1c, 0xe1, 0x85, 0xa9, 0x39, 0xf9, 0x2a, 0x52, 0x33, 0x4c, 0xa7, 0xfc, 0xab, + 0x49, 0xa7, 0xcf, 0x2c, 0x98, 0x4e, 0x56, 0x71, 0x7f, 0xef, 0xb4, 0x46, 0xe8, 0x9d, 0xdf, 0x86, + 0x71, 0xc1, 0x4d, 0x09, 0x9e, 0xea, 0xa4, 0x07, 0x03, 0x3b, 0xbe, 0xcb, 0xf1, 0xb8, 0xe0, 0xb5, + 0x9b, 0x4f, 0x9f, 0x2d, 0x8d, 0x7d, 0xfe, 0x6c, 0x69, 0xec, 0x8b, 0x67, 0x4b, 0x63, 0x9f, 0x1e, + 0x2f, 0x59, 0x4f, 0x8f, 0x97, 0xac, 0xcf, 0x8f, 0x97, 0xac, 0x2f, 0x8e, 0x97, 0xac, 0x2f, 0x8f, + 0x97, 0xac, 0x5f, 0xfe, 0x77, 0x69, 0xec, 0xe1, 0xf2, 0x49, 0xff, 0x46, 0xf0, 0xff, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x5e, 0x3a, 0xd7, 0x70, 0x69, 0x20, 0x00, 0x00, +} + +func (m *CustomDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomDeploymentStrategyParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomDeploymentStrategyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Environment) > 0 { + for iNdEx := len(m.Environment) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Environment[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ImageTrigger != nil { + { + size, err := m.ImageTrigger.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentCauseImageTrigger) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCauseImageTrigger) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCauseImageTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentConfigList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfigList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfigList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentConfigRollback) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfigRollback) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfigRollback) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.UpdatedAnnotations) > 0 { + keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations)) + for k := range m.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + for iNdEx := len(keysForUpdatedAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.UpdatedAnnotations[string(keysForUpdatedAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForUpdatedAnnotations[iNdEx]) + copy(dAtA[i:], keysForUpdatedAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUpdatedAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentConfigRollbackSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfigRollbackSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfigRollbackSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.IncludeStrategy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + i-- + if m.IncludeReplicationMeta { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + i-- + if m.IncludeTemplate { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i-- + if m.IncludeTriggers { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x10 + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentConfigSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfigSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfigSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x48 + if m.Template != nil { + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + i-- + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + i-- + if m.Test { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x20 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x18 + if m.Triggers != nil { + { + size, err := m.Triggers.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentConfigStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfigStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x48 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.Details != nil { + { + size, err := m.Details.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.LatestVersion)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *DeploymentDetails) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentDetails) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Causes) > 0 { + for iNdEx := len(m.Causes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Causes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentLog) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentLog) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentLog) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *DeploymentLogOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentLogOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Version != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Version)) + i-- + dAtA[i] = 0x50 + } + i-- + if m.NoWait { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + if m.LimitBytes != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) + i-- + dAtA[i] = 0x40 + } + if m.TailLines != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) + i-- + dAtA[i] = 0x38 + } + i-- + if m.Timestamps { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if m.SinceTime != nil { + { + size, err := m.SinceTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.SinceSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) + i-- + dAtA[i] = 0x20 + } + i-- + if m.Previous { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Follow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ExcludeTriggers) > 0 { + for iNdEx := len(m.ExcludeTriggers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExcludeTriggers[iNdEx]) + copy(dAtA[i:], m.ExcludeTriggers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExcludeTriggers[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Latest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ActiveDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + i-- + dAtA[i] = 0x40 + } + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Labels) > 0 { + keysForLabels := make([]string, 0, len(m.Labels)) + for k := range m.Labels { + keysForLabels = append(keysForLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.Labels[string(keysForLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForLabels[iNdEx]) + copy(dAtA[i:], keysForLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if m.RollingParams != nil { + { + size, err := m.RollingParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.RecreateParams != nil { + { + size, err := m.RecreateParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.CustomParams != nil { + { + size, err := m.CustomParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentTriggerImageChangeParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentTriggerImageChangeParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentTriggerImageChangeParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.LastTriggeredImage) + copy(dAtA[i:], m.LastTriggeredImage) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LastTriggeredImage))) + i-- + dAtA[i] = 0x22 + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.ContainerNames) > 0 { + for iNdEx := len(m.ContainerNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ContainerNames[iNdEx]) + copy(dAtA[i:], m.ContainerNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerNames[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i-- + if m.Automatic { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m DeploymentTriggerPolicies) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m DeploymentTriggerPolicies) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m DeploymentTriggerPolicies) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeploymentTriggerPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentTriggerPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentTriggerPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ImageChangeParams != nil { + { + size, err := m.ImageChangeParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExecNewPodHook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecNewPodHook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecNewPodHook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Volumes[iNdEx]) + copy(dAtA[i:], m.Volumes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.ContainerName) + copy(dAtA[i:], m.ContainerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i-- + dAtA[i] = 0x1a + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LifecycleHook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LifecycleHook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LifecycleHook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TagImages) > 0 { + for iNdEx := len(m.TagImages) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TagImages[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.ExecNewPod != nil { + { + size, err := m.ExecNewPod.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.FailurePolicy) + copy(dAtA[i:], m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FailurePolicy))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RecreateDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RecreateDeploymentStrategyParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RecreateDeploymentStrategyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Post != nil { + { + size, err := m.Post.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Mid != nil { + { + size, err := m.Mid.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Pre != nil { + { + size, err := m.Pre.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RollingDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingDeploymentStrategyParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingDeploymentStrategyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Post != nil { + { + size, err := m.Post.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.Pre != nil { + { + size, err := m.Pre.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x18 + } + if m.IntervalSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.IntervalSeconds)) + i-- + dAtA[i] = 0x10 + } + if m.UpdatePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.UpdatePeriodSeconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TagImageHook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagImageHook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagImageHook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.ContainerName) + copy(dAtA[i:], m.ContainerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CustomDeploymentStrategyParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Environment) > 0 { + for _, e := range m.Environment { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.ImageTrigger != nil { + l = m.ImageTrigger.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeploymentCauseImageTrigger) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentConfigList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentConfigRollback) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UpdatedAnnotations) > 0 { + for k, v := range m.UpdatedAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentConfigRollbackSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Revision)) + n += 2 + n += 2 + n += 2 + n += 2 + return n +} + +func (m *DeploymentConfigSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Triggers != nil { + l = m.Triggers.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Replicas)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + n += 2 + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Template != nil { + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + return n +} + +func (m *DeploymentConfigStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.LatestVersion)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if m.Details != nil { + l = m.Details.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + return n +} + +func (m *DeploymentDetails) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Causes) > 0 { + for _, e := range m.Causes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentLog) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *DeploymentLogOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.SinceSeconds != nil { + n += 1 + sovGenerated(uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + l = m.SinceTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.TailLines != nil { + n += 1 + sovGenerated(uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + n += 1 + sovGenerated(uint64(*m.LimitBytes)) + } + n += 2 + if m.Version != nil { + n += 1 + sovGenerated(uint64(*m.Version)) + } + return n +} + +func (m *DeploymentRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if len(m.ExcludeTriggers) > 0 { + for _, s := range m.ExcludeTriggers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.CustomParams != nil { + l = m.CustomParams.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RecreateParams != nil { + l = m.RecreateParams.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RollingParams != nil { + l = m.RollingParams.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.ActiveDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + return n +} + +func (m *DeploymentTriggerImageChangeParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + if len(m.ContainerNames) > 0 { + for _, s := range m.ContainerNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.LastTriggeredImage) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m DeploymentTriggerPolicies) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for _, e := range m { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentTriggerPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.ImageChangeParams != nil { + l = m.ImageChangeParams.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ExecNewPodHook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LifecycleHook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.ExecNewPod != nil { + l = m.ExecNewPod.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.TagImages) > 0 { + for _, e := range m.TagImages { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RecreateDeploymentStrategyParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if m.Pre != nil { + l = m.Pre.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Mid != nil { + l = m.Mid.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Post != nil { + l = m.Post.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollingDeploymentStrategyParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UpdatePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.UpdatePeriodSeconds)) + } + if m.IntervalSeconds != nil { + n += 1 + sovGenerated(uint64(*m.IntervalSeconds)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pre != nil { + l = m.Pre.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Post != nil { + l = m.Post.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TagImageHook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.To.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CustomDeploymentStrategyParams) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnvironment := "[]EnvVar{" + for _, f := range this.Environment { + repeatedStringForEnvironment += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnvironment += "}" + s := strings.Join([]string{`&CustomDeploymentStrategyParams{`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Environment:` + repeatedStringForEnvironment + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCause{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ImageTrigger:` + strings.Replace(this.ImageTrigger.String(), "DeploymentCauseImageTrigger", "DeploymentCauseImageTrigger", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCauseImageTrigger) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCauseImageTrigger{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentConfig{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentConfigSpec", "DeploymentConfigSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentConfigStatus", "DeploymentConfigStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfigList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DeploymentConfig{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DeploymentConfig", "DeploymentConfig", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DeploymentConfigList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfigRollback) String() string { + if this == nil { + return "nil" + } + keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) + for k := range this.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + mapStringForUpdatedAnnotations := "map[string]string{" + for _, k := range keysForUpdatedAnnotations { + mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) + } + mapStringForUpdatedAnnotations += "}" + s := strings.Join([]string{`&DeploymentConfigRollback{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentConfigRollbackSpec", "DeploymentConfigRollbackSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfigRollbackSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentConfigRollbackSpec{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `IncludeTriggers:` + fmt.Sprintf("%v", this.IncludeTriggers) + `,`, + `IncludeTemplate:` + fmt.Sprintf("%v", this.IncludeTemplate) + `,`, + `IncludeReplicationMeta:` + fmt.Sprintf("%v", this.IncludeReplicationMeta) + `,`, + `IncludeStrategy:` + fmt.Sprintf("%v", this.IncludeStrategy) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfigSpec) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&DeploymentConfigSpec{`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `Triggers:` + strings.Replace(fmt.Sprintf("%v", this.Triggers), "DeploymentTriggerPolicies", "DeploymentTriggerPolicies", 1) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Test:` + fmt.Sprintf("%v", this.Test) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `Selector:` + mapStringForSelector + `,`, + `Template:` + strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v1.PodTemplateSpec", 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfigStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&DeploymentConfigStatus{`, + `LatestVersion:` + fmt.Sprintf("%v", this.LatestVersion) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Details:` + strings.Replace(this.Details.String(), "DeploymentDetails", "DeploymentDetails", 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentDetails) String() string { + if this == nil { + return "nil" + } + repeatedStringForCauses := "[]DeploymentCause{" + for _, f := range this.Causes { + repeatedStringForCauses += strings.Replace(strings.Replace(f.String(), "DeploymentCause", "DeploymentCause", 1), `&`, ``, 1) + "," + } + repeatedStringForCauses += "}" + s := strings.Join([]string{`&DeploymentDetails{`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Causes:` + repeatedStringForCauses + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentLog) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentLog{`, + `}`, + }, "") + return s +} +func (this *DeploymentLogOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentLogOptions{`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, + `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, + `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, + `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "v11.Time", 1) + `,`, + `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, + `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, + `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, + `NoWait:` + fmt.Sprintf("%v", this.NoWait) + `,`, + `Version:` + valueToStringGenerated(this.Version) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Latest:` + fmt.Sprintf("%v", this.Latest) + `,`, + `Force:` + fmt.Sprintf("%v", this.Force) + `,`, + `ExcludeTriggers:` + fmt.Sprintf("%v", this.ExcludeTriggers) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `CustomParams:` + strings.Replace(this.CustomParams.String(), "CustomDeploymentStrategyParams", "CustomDeploymentStrategyParams", 1) + `,`, + `RecreateParams:` + strings.Replace(this.RecreateParams.String(), "RecreateDeploymentStrategyParams", "RecreateDeploymentStrategyParams", 1) + `,`, + `RollingParams:` + strings.Replace(this.RollingParams.String(), "RollingDeploymentStrategyParams", "RollingDeploymentStrategyParams", 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentTriggerImageChangeParams) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentTriggerImageChangeParams{`, + `Automatic:` + fmt.Sprintf("%v", this.Automatic) + `,`, + `ContainerNames:` + fmt.Sprintf("%v", this.ContainerNames) + `,`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `LastTriggeredImage:` + fmt.Sprintf("%v", this.LastTriggeredImage) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentTriggerPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentTriggerPolicy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ImageChangeParams:` + strings.Replace(this.ImageChangeParams.String(), "DeploymentTriggerImageChangeParams", "DeploymentTriggerImageChangeParams", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ExecNewPodHook) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + s := strings.Join([]string{`&ExecNewPodHook{`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, + `}`, + }, "") + return s +} +func (this *LifecycleHook) String() string { + if this == nil { + return "nil" + } + repeatedStringForTagImages := "[]TagImageHook{" + for _, f := range this.TagImages { + repeatedStringForTagImages += strings.Replace(strings.Replace(f.String(), "TagImageHook", "TagImageHook", 1), `&`, ``, 1) + "," + } + repeatedStringForTagImages += "}" + s := strings.Join([]string{`&LifecycleHook{`, + `FailurePolicy:` + fmt.Sprintf("%v", this.FailurePolicy) + `,`, + `ExecNewPod:` + strings.Replace(this.ExecNewPod.String(), "ExecNewPodHook", "ExecNewPodHook", 1) + `,`, + `TagImages:` + repeatedStringForTagImages + `,`, + `}`, + }, "") + return s +} +func (this *RecreateDeploymentStrategyParams) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RecreateDeploymentStrategyParams{`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `Pre:` + strings.Replace(this.Pre.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, + `Mid:` + strings.Replace(this.Mid.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, + `Post:` + strings.Replace(this.Post.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingDeploymentStrategyParams) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingDeploymentStrategyParams{`, + `UpdatePeriodSeconds:` + valueToStringGenerated(this.UpdatePeriodSeconds) + `,`, + `IntervalSeconds:` + valueToStringGenerated(this.IntervalSeconds) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `Pre:` + strings.Replace(this.Pre.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, + `Post:` + strings.Replace(this.Post.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TagImageHook) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TagImageHook{`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `To:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.To), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CustomDeploymentStrategyParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomDeploymentStrategyParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomDeploymentStrategyParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Environment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Environment = append(m.Environment, v1.EnvVar{}) + if err := m.Environment[len(m.Environment)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentTriggerType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageTrigger", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageTrigger == nil { + m.ImageTrigger = &DeploymentCauseImageTrigger{} + } + if err := m.ImageTrigger.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCauseImageTrigger) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCauseImageTrigger: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCauseImageTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfigList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfigList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfigList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DeploymentConfig{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfigRollback) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfigRollback: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfigRollback: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdatedAnnotations == nil { + m.UpdatedAnnotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.UpdatedAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfigRollbackSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfigRollbackSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfigRollbackSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeTriggers", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeTriggers = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeTemplate", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeTemplate = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeReplicationMeta", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeReplicationMeta = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeStrategy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeStrategy = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfigSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfigSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Triggers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Triggers == nil { + m.Triggers = DeploymentTriggerPolicies{} + } + if err := m.Triggers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Test", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Test = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Selector[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Template == nil { + m.Template = &v1.PodTemplateSpec{} + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfigStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfigStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LatestVersion", wireType) + } + m.LatestVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LatestVersion |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnavailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Details == nil { + m.Details = &DeploymentDetails{} + } + if err := m.Details.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentDetails) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentDetails: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentDetails: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Causes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Causes = append(m.Causes, DeploymentCause{}) + if err := m.Causes[len(m.Causes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentLog) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentLog: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentLog: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentLogOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentLogOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Follow = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Previous", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Previous = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SinceSeconds = &v + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SinceTime == nil { + m.SinceTime = &v11.Time{} + } + if err := m.SinceTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Timestamps = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TailLines", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TailLines = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitBytes", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LimitBytes = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoWait", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoWait = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Version = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Latest", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Latest = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTriggers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExcludeTriggers = append(m.ExcludeTriggers, DeploymentTriggerType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CustomParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CustomParams == nil { + m.CustomParams = &CustomDeploymentStrategyParams{} + } + if err := m.CustomParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RecreateParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RecreateParams == nil { + m.RecreateParams = &RecreateDeploymentStrategyParams{} + } + if err := m.RecreateParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingParams == nil { + m.RollingParams = &RollingDeploymentStrategyParams{} + } + if err := m.RollingParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveDeadlineSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentTriggerImageChangeParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentTriggerImageChangeParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentTriggerImageChangeParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Automatic", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Automatic = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerNames = append(m.ContainerNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTriggeredImage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastTriggeredImage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentTriggerPolicies) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentTriggerPolicies: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentTriggerPolicies: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, DeploymentTriggerPolicy{}) + if err := (*m)[len(*m)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentTriggerPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentTriggerPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentTriggerPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentTriggerType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageChangeParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageChangeParams == nil { + m.ImageChangeParams = &DeploymentTriggerImageChangeParams{} + } + if err := m.ImageChangeParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecNewPodHook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecNewPodHook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v1.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LifecycleHook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LifecycleHook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LifecycleHook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FailurePolicy = LifecycleHookFailurePolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecNewPod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExecNewPod == nil { + m.ExecNewPod = &ExecNewPodHook{} + } + if err := m.ExecNewPod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TagImages", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TagImages = append(m.TagImages, TagImageHook{}) + if err := m.TagImages[len(m.TagImages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RecreateDeploymentStrategyParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RecreateDeploymentStrategyParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RecreateDeploymentStrategyParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pre", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pre == nil { + m.Pre = &LifecycleHook{} + } + if err := m.Pre.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mid", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Mid == nil { + m.Mid = &LifecycleHook{} + } + if err := m.Mid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Post", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Post == nil { + m.Post = &LifecycleHook{} + } + if err := m.Post.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingDeploymentStrategyParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingDeploymentStrategyParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UpdatePeriodSeconds = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntervalSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IntervalSeconds = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxSurge == nil { + m.MaxSurge = &intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pre", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pre == nil { + m.Pre = &LifecycleHook{} + } + if err := m.Pre.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Post", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Post == nil { + m.Post = &LifecycleHook{} + } + if err := m.Post.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagImageHook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagImageHook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagImageHook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/apps/v1/generated.proto b/vendor/github.com/openshift/api/apps/v1/generated.proto new file mode 100644 index 0000000000000..6f50fcaf95952 --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/generated.proto @@ -0,0 +1,490 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.apps.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/apps/v1"; + +// CustomDeploymentStrategyParams are the input to the Custom deployment strategy. +message CustomDeploymentStrategyParams { + // image specifies a container image which can carry out a deployment. + optional string image = 1; + + // environment holds the environment which will be given to the container for Image. + repeated .k8s.io.api.core.v1.EnvVar environment = 2; + + // command is optional and overrides CMD in the container Image. + repeated string command = 3; +} + +// DeploymentCause captures information about a particular cause of a deployment. +message DeploymentCause { + // type of the trigger that resulted in the creation of a new deployment + optional string type = 1; + + // imageTrigger contains the image trigger details, if this trigger was fired based on an image change + optional DeploymentCauseImageTrigger imageTrigger = 2; +} + +// DeploymentCauseImageTrigger represents details about the cause of a deployment originating +// from an image change trigger +message DeploymentCauseImageTrigger { + // from is a reference to the changed object which triggered a deployment. The field may have + // the kinds DockerImage, ImageStreamTag, or ImageStreamImage. + optional .k8s.io.api.core.v1.ObjectReference from = 1; +} + +// DeploymentCondition describes the state of a deployment config at a certain point. +message DeploymentCondition { + // type of deployment condition. + optional string type = 1; + + // status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time this condition was updated. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + + // The last time the condition transitioned from one status to another. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + optional string reason = 4; + + // A human readable message indicating details about the transition. + optional string message = 5; +} + +// Deployment Configs define the template for a pod and manages deploying new images or configuration changes. +// A single deployment configuration is usually analogous to a single micro-service. Can support many different +// deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as +// well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller. +// +// A deployment is "triggered" when its configuration is changed or a tag in an Image Stream is changed. +// Triggers can be disabled to allow manual control over a deployment. The "strategy" determines how the deployment +// is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment +// is triggered by any means. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// Deprecated: Use deployments or other means for declarative updates for pods instead. +// +openshift:compatibility-gen:level=1 +message DeploymentConfig { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec represents a desired deployment state and how to deploy to it. + optional DeploymentConfigSpec spec = 2; + + // status represents the current deployment state. + // +optional + optional DeploymentConfigStatus status = 3; +} + +// DeploymentConfigList is a collection of deployment configs. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message DeploymentConfigList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of deployment configs + repeated DeploymentConfig items = 2; +} + +// DeploymentConfigRollback provides the input to rollback generation. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message DeploymentConfigRollback { + // name of the deployment config that will be rolled back. + optional string name = 1; + + // updatedAnnotations is a set of new annotations that will be added in the deployment config. + map updatedAnnotations = 2; + + // spec defines the options to rollback generation. + optional DeploymentConfigRollbackSpec spec = 3; +} + +// DeploymentConfigRollbackSpec represents the options for rollback generation. +message DeploymentConfigRollbackSpec { + // from points to a ReplicationController which is a deployment. + optional .k8s.io.api.core.v1.ObjectReference from = 1; + + // revision to rollback to. If set to 0, rollback to the last revision. + optional int64 revision = 2; + + // includeTriggers specifies whether to include config Triggers. + optional bool includeTriggers = 3; + + // includeTemplate specifies whether to include the PodTemplateSpec. + optional bool includeTemplate = 4; + + // includeReplicationMeta specifies whether to include the replica count and selector. + optional bool includeReplicationMeta = 5; + + // includeStrategy specifies whether to include the deployment Strategy. + optional bool includeStrategy = 6; +} + +// DeploymentConfigSpec represents the desired state of the deployment. +message DeploymentConfigSpec { + // strategy describes how a deployment is executed. + // +optional + optional DeploymentStrategy strategy = 1; + + // minReadySeconds is the minimum number of seconds for which a newly created pod should + // be ready without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + optional int32 minReadySeconds = 9; + + // triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers + // are defined, a new deployment can only occur as a result of an explicit client update to the + // DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger. + // +optional + optional DeploymentTriggerPolicies triggers = 2; + + // replicas is the number of desired replicas. + // +optional + optional int32 replicas = 3; + + // revisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. + // This field is a pointer to allow for differentiation between an explicit zero and not specified. + // Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.) + optional int32 revisionHistoryLimit = 4; + + // test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the + // deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding + // or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action. + // +optional + optional bool test = 5; + + // paused indicates that the deployment config is paused resulting in no new deployments on template + // changes or changes in the template caused by other triggers. + optional bool paused = 6; + + // selector is a label query over pods that should match the Replicas count. + map selector = 7; + + // template is the object that describes the pod that will be created if + // insufficient replicas are detected. + optional .k8s.io.api.core.v1.PodTemplateSpec template = 8; +} + +// DeploymentConfigStatus represents the current deployment state. +message DeploymentConfigStatus { + // latestVersion is used to determine whether the current deployment associated with a deployment + // config is out of sync. + optional int64 latestVersion = 1; + + // observedGeneration is the most recent generation observed by the deployment config controller. + optional int64 observedGeneration = 2; + + // replicas is the total number of pods targeted by this deployment config. + optional int32 replicas = 3; + + // updatedReplicas is the total number of non-terminated pods targeted by this deployment config + // that have the desired template spec. + optional int32 updatedReplicas = 4; + + // availableReplicas is the total number of available pods targeted by this deployment config. + optional int32 availableReplicas = 5; + + // unavailableReplicas is the total number of unavailable pods targeted by this deployment config. + optional int32 unavailableReplicas = 6; + + // details are the reasons for the update to this deployment config. + // This could be based on a change made by the user or caused by an automatic trigger + optional DeploymentDetails details = 7; + + // conditions represents the latest available observations of a deployment config's current state. + // +patchMergeKey=type + // +patchStrategy=merge + repeated DeploymentCondition conditions = 8; + + // Total number of ready pods targeted by this deployment. + optional int32 readyReplicas = 9; +} + +// DeploymentDetails captures information about the causes of a deployment. +message DeploymentDetails { + // message is the user specified change message, if this deployment was triggered manually by the user + optional string message = 1; + + // causes are extended data associated with all the causes for creating a new deployment + repeated DeploymentCause causes = 2; +} + +// DeploymentLog represents the logs for a deployment +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message DeploymentLog { +} + +// DeploymentLogOptions is the REST options for a deployment log +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message DeploymentLogOptions { + // The container for which to stream logs. Defaults to only container if there is one container in the pod. + optional string container = 1; + + // follow if true indicates that the build log should be streamed until + // the build terminates. + optional bool follow = 2; + + // Return previous deployment logs. Defaults to false. + optional bool previous = 3; + + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + optional int64 sinceSeconds = 4; + + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5; + + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + optional bool timestamps = 6; + + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + optional int64 tailLines = 7; + + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + optional int64 limitBytes = 8; + + // nowait if true causes the call to return immediately even if the deployment + // is not available yet. Otherwise the server will wait until the deployment has started. + // TODO: Fix the tag to 'noWait' in v2 + optional bool nowait = 9; + + // version of the deployment for which to view logs. + optional int64 version = 10; +} + +// DeploymentRequest is a request to a deployment config for a new deployment. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message DeploymentRequest { + // name of the deployment config for requesting a new deployment. + optional string name = 1; + + // latest will update the deployment config with the latest state from all triggers. + optional bool latest = 2; + + // force will try to force a new deployment to run. If the deployment config is paused, + // then setting this to true will return an Invalid error. + optional bool force = 3; + + // excludeTriggers instructs the instantiator to avoid processing the specified triggers. + // This field overrides the triggers from latest and allows clients to control specific + // logic. This field is ignored if not specified. + repeated string excludeTriggers = 4; +} + +// DeploymentStrategy describes how to perform a deployment. +message DeploymentStrategy { + // type is the name of a deployment strategy. + // +optional + optional string type = 1; + + // customParams are the input to the Custom deployment strategy, and may also + // be specified for the Recreate and Rolling strategies to customize the execution + // process that runs the deployment. + optional CustomDeploymentStrategyParams customParams = 2; + + // recreateParams are the input to the Recreate deployment strategy. + optional RecreateDeploymentStrategyParams recreateParams = 3; + + // rollingParams are the input to the Rolling deployment strategy. + optional RollingDeploymentStrategyParams rollingParams = 4; + + // resources contains resource requirements to execute the deployment and any hooks. + optional .k8s.io.api.core.v1.ResourceRequirements resources = 5; + + // labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + map labels = 6; + + // annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + map annotations = 7; + + // activeDeadlineSeconds is the duration in seconds that the deployer pods for this deployment + // config may be active on a node before the system actively tries to terminate them. + optional int64 activeDeadlineSeconds = 8; +} + +// DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger. +message DeploymentTriggerImageChangeParams { + // automatic means that the detection of a new tag value should result in an image update + // inside the pod template. + optional bool automatic = 1; + + // containerNames is used to restrict tag updates to the specified set of container names in a pod. + // If multiple triggers point to the same containers, the resulting behavior is undefined. Future + // API versions will make this a validation error. If ContainerNames does not point to a valid container, + // the trigger will be ignored. Future API versions will make this a validation error. + repeated string containerNames = 2; + + // from is a reference to an image stream tag to watch for changes. From.Name is the only + // required subfield - if From.Namespace is blank, the namespace of the current deployment + // trigger will be used. + optional .k8s.io.api.core.v1.ObjectReference from = 3; + + // lastTriggeredImage is the last image to be triggered. + optional string lastTriggeredImage = 4; +} + +// DeploymentTriggerPolicies is a list of policies where nil values and different from empty arrays. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message DeploymentTriggerPolicies { + // items, if empty, will result in an empty slice + + repeated DeploymentTriggerPolicy items = 1; +} + +// DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment. +message DeploymentTriggerPolicy { + // type of the trigger + optional string type = 1; + + // imageChangeParams represents the parameters for the ImageChange trigger. + optional DeploymentTriggerImageChangeParams imageChangeParams = 2; +} + +// ExecNewPodHook is a hook implementation which runs a command in a new pod +// based on the specified container which is assumed to be part of the +// deployment template. +message ExecNewPodHook { + // command is the action command and its arguments. + repeated string command = 1; + + // env is a set of environment variables to supply to the hook pod's container. + repeated .k8s.io.api.core.v1.EnvVar env = 2; + + // containerName is the name of a container in the deployment pod template + // whose container image will be used for the hook pod's container. + optional string containerName = 3; + + // volumes is a list of named volumes from the pod template which should be + // copied to the hook pod. Volumes names not found in pod spec are ignored. + // An empty list means no volumes will be copied. + repeated string volumes = 4; +} + +// LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time. +message LifecycleHook { + // failurePolicy specifies what action to take if the hook fails. + optional string failurePolicy = 1; + + // execNewPod specifies the options for a lifecycle hook backed by a pod. + optional ExecNewPodHook execNewPod = 2; + + // tagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag. + repeated TagImageHook tagImages = 3; +} + +// RecreateDeploymentStrategyParams are the input to the Recreate deployment +// strategy. +message RecreateDeploymentStrategyParams { + // timeoutSeconds is the time to wait for updates before giving up. If the + // value is nil, a default will be used. + optional int64 timeoutSeconds = 1; + + // pre is a lifecycle hook which is executed before the strategy manipulates + // the deployment. All LifecycleHookFailurePolicy values are supported. + optional LifecycleHook pre = 2; + + // mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new + // pod is created. All LifecycleHookFailurePolicy values are supported. + optional LifecycleHook mid = 3; + + // post is a lifecycle hook which is executed after the strategy has + // finished all deployment logic. All LifecycleHookFailurePolicy values are supported. + optional LifecycleHook post = 4; +} + +// RollingDeploymentStrategyParams are the input to the Rolling deployment +// strategy. +message RollingDeploymentStrategyParams { + // updatePeriodSeconds is the time to wait between individual pod updates. + // If the value is nil, a default will be used. + optional int64 updatePeriodSeconds = 1; + + // intervalSeconds is the time to wait between polling deployment status + // after update. If the value is nil, a default will be used. + optional int64 intervalSeconds = 2; + + // timeoutSeconds is the time to wait for updates before giving up. If the + // value is nil, a default will be used. + optional int64 timeoutSeconds = 3; + + // maxUnavailable is the maximum number of pods that can be unavailable + // during the update. Value can be an absolute number (ex: 5) or a + // percentage of total pods at the start of update (ex: 10%). Absolute + // number is calculated from percentage by rounding down. + // + // This cannot be 0 if MaxSurge is 0. By default, 25% is used. + // + // Example: when this is set to 30%, the old RC can be scaled down by 30% + // immediately when the rolling update starts. Once new pods are ready, old + // RC can be scaled down further, followed by scaling up the new RC, + // ensuring that at least 70% of original number of pods are available at + // all times during the update. + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 4; + + // maxSurge is the maximum number of pods that can be scheduled above the + // original number of pods. Value can be an absolute number (ex: 5) or a + // percentage of total pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // + // This cannot be 0 if MaxUnavailable is 0. By default, 25% is used. + // + // Example: when this is set to 30%, the new RC can be scaled up by 30% + // immediately when the rolling update starts. Once old pods have been + // killed, new RC can be scaled up further, ensuring that total number of + // pods running at any time during the update is atmost 130% of original + // pods. + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 5; + + // pre is a lifecycle hook which is executed before the deployment process + // begins. All LifecycleHookFailurePolicy values are supported. + optional LifecycleHook pre = 7; + + // post is a lifecycle hook which is executed after the strategy has + // finished all deployment logic. All LifecycleHookFailurePolicy values + // are supported. + optional LifecycleHook post = 8; +} + +// TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag. +message TagImageHook { + // containerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single + // container this value will be defaulted to the name of that container. + optional string containerName = 1; + + // to is the target ImageStreamTag to set the container's image onto. + optional .k8s.io.api.core.v1.ObjectReference to = 2; +} + diff --git a/vendor/github.com/openshift/api/apps/v1/legacy.go b/vendor/github.com/openshift/api/apps/v1/legacy.go new file mode 100644 index 0000000000000..c8fa0ed999f39 --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/legacy.go @@ -0,0 +1,28 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme, extensionsv1beta1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &DeploymentConfig{}, + &DeploymentConfigList{}, + &DeploymentConfigRollback{}, + &DeploymentRequest{}, + &DeploymentLog{}, + &DeploymentLogOptions{}, + &extensionsv1beta1.Scale{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/apps/v1/register.go b/vendor/github.com/openshift/api/apps/v1/register.go new file mode 100644 index 0000000000000..0c1e47e6d46ef --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/register.go @@ -0,0 +1,45 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "apps.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme, extensionsv1beta1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &DeploymentConfig{}, + &DeploymentConfigList{}, + &DeploymentConfigRollback{}, + &DeploymentRequest{}, + &DeploymentLog{}, + &DeploymentLogOptions{}, + &extensionsv1beta1.Scale{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/apps/v1/types.go b/vendor/github.com/openshift/api/apps/v1/types.go new file mode 100644 index 0000000000000..619c30e828e9c --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/types.go @@ -0,0 +1,537 @@ +package v1 + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// +genclient +// +genclient:method=Instantiate,verb=create,subresource=instantiate,input=DeploymentRequest +// +genclient:method=Rollback,verb=create,subresource=rollback,input=DeploymentConfigRollback +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/extensions/v1beta1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/extensions/v1beta1.Scale,result=k8s.io/api/extensions/v1beta1.Scale +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=3.0 +// +k8s:prerelease-lifecycle-gen:deprecated=4.14 +// +k8s:prerelease-lifecycle-gen:removed=4.10000 + +// Deployment Configs define the template for a pod and manages deploying new images or configuration changes. +// A single deployment configuration is usually analogous to a single micro-service. Can support many different +// deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as +// well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller. +// +// A deployment is "triggered" when its configuration is changed or a tag in an Image Stream is changed. +// Triggers can be disabled to allow manual control over a deployment. The "strategy" determines how the deployment +// is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment +// is triggered by any means. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// Deprecated: Use deployments or other means for declarative updates for pods instead. +// +openshift:compatibility-gen:level=1 +type DeploymentConfig struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec represents a desired deployment state and how to deploy to it. + Spec DeploymentConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // status represents the current deployment state. + // +optional + Status DeploymentConfigStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// DeploymentConfigSpec represents the desired state of the deployment. +type DeploymentConfigSpec struct { + // strategy describes how a deployment is executed. + // +optional + Strategy DeploymentStrategy `json:"strategy" protobuf:"bytes,1,opt,name=strategy"` + + // minReadySeconds is the minimum number of seconds for which a newly created pod should + // be ready without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` + + // triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers + // are defined, a new deployment can only occur as a result of an explicit client update to the + // DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger. + // +optional + Triggers DeploymentTriggerPolicies `json:"triggers" protobuf:"bytes,2,rep,name=triggers"` + + // replicas is the number of desired replicas. + // +optional + Replicas int32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` + + // revisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. + // This field is a pointer to allow for differentiation between an explicit zero and not specified. + // Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.) + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,4,opt,name=revisionHistoryLimit"` + + // test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the + // deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding + // or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action. + // +optional + Test bool `json:"test" protobuf:"varint,5,opt,name=test"` + + // paused indicates that the deployment config is paused resulting in no new deployments on template + // changes or changes in the template caused by other triggers. + Paused bool `json:"paused,omitempty" protobuf:"varint,6,opt,name=paused"` + + // selector is a label query over pods that should match the Replicas count. + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,7,rep,name=selector"` + + // template is the object that describes the pod that will be created if + // insufficient replicas are detected. + Template *corev1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,8,opt,name=template"` +} + +// DeploymentStrategy describes how to perform a deployment. +type DeploymentStrategy struct { + // type is the name of a deployment strategy. + // +optional + Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` + + // customParams are the input to the Custom deployment strategy, and may also + // be specified for the Recreate and Rolling strategies to customize the execution + // process that runs the deployment. + CustomParams *CustomDeploymentStrategyParams `json:"customParams,omitempty" protobuf:"bytes,2,opt,name=customParams"` + // recreateParams are the input to the Recreate deployment strategy. + RecreateParams *RecreateDeploymentStrategyParams `json:"recreateParams,omitempty" protobuf:"bytes,3,opt,name=recreateParams"` + // rollingParams are the input to the Rolling deployment strategy. + RollingParams *RollingDeploymentStrategyParams `json:"rollingParams,omitempty" protobuf:"bytes,4,opt,name=rollingParams"` + + // resources contains resource requirements to execute the deployment and any hooks. + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,5,opt,name=resources"` + // labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,6,rep,name=labels"` + // annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,7,rep,name=annotations"` + + // activeDeadlineSeconds is the duration in seconds that the deployer pods for this deployment + // config may be active on a node before the system actively tries to terminate them. + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,8,opt,name=activeDeadlineSeconds"` +} + +// DeploymentStrategyType refers to a specific DeploymentStrategy implementation. +type DeploymentStrategyType string + +const ( + // DeploymentStrategyTypeRecreate is a simple strategy suitable as a default. + DeploymentStrategyTypeRecreate DeploymentStrategyType = "Recreate" + // DeploymentStrategyTypeCustom is a user defined strategy. + DeploymentStrategyTypeCustom DeploymentStrategyType = "Custom" + // DeploymentStrategyTypeRolling uses the Kubernetes RollingUpdater. + DeploymentStrategyTypeRolling DeploymentStrategyType = "Rolling" +) + +// CustomDeploymentStrategyParams are the input to the Custom deployment strategy. +type CustomDeploymentStrategyParams struct { + // image specifies a container image which can carry out a deployment. + Image string `json:"image,omitempty" protobuf:"bytes,1,opt,name=image"` + // environment holds the environment which will be given to the container for Image. + Environment []corev1.EnvVar `json:"environment,omitempty" protobuf:"bytes,2,rep,name=environment"` + // command is optional and overrides CMD in the container Image. + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` +} + +// RecreateDeploymentStrategyParams are the input to the Recreate deployment +// strategy. +type RecreateDeploymentStrategyParams struct { + // timeoutSeconds is the time to wait for updates before giving up. If the + // value is nil, a default will be used. + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,1,opt,name=timeoutSeconds"` + // pre is a lifecycle hook which is executed before the strategy manipulates + // the deployment. All LifecycleHookFailurePolicy values are supported. + Pre *LifecycleHook `json:"pre,omitempty" protobuf:"bytes,2,opt,name=pre"` + // mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new + // pod is created. All LifecycleHookFailurePolicy values are supported. + Mid *LifecycleHook `json:"mid,omitempty" protobuf:"bytes,3,opt,name=mid"` + // post is a lifecycle hook which is executed after the strategy has + // finished all deployment logic. All LifecycleHookFailurePolicy values are supported. + Post *LifecycleHook `json:"post,omitempty" protobuf:"bytes,4,opt,name=post"` +} + +// RollingDeploymentStrategyParams are the input to the Rolling deployment +// strategy. +type RollingDeploymentStrategyParams struct { + // updatePeriodSeconds is the time to wait between individual pod updates. + // If the value is nil, a default will be used. + UpdatePeriodSeconds *int64 `json:"updatePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=updatePeriodSeconds"` + // intervalSeconds is the time to wait between polling deployment status + // after update. If the value is nil, a default will be used. + IntervalSeconds *int64 `json:"intervalSeconds,omitempty" protobuf:"varint,2,opt,name=intervalSeconds"` + // timeoutSeconds is the time to wait for updates before giving up. If the + // value is nil, a default will be used. + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"` + // maxUnavailable is the maximum number of pods that can be unavailable + // during the update. Value can be an absolute number (ex: 5) or a + // percentage of total pods at the start of update (ex: 10%). Absolute + // number is calculated from percentage by rounding down. + // + // This cannot be 0 if MaxSurge is 0. By default, 25% is used. + // + // Example: when this is set to 30%, the old RC can be scaled down by 30% + // immediately when the rolling update starts. Once new pods are ready, old + // RC can be scaled down further, followed by scaling up the new RC, + // ensuring that at least 70% of original number of pods are available at + // all times during the update. + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,4,opt,name=maxUnavailable"` + // maxSurge is the maximum number of pods that can be scheduled above the + // original number of pods. Value can be an absolute number (ex: 5) or a + // percentage of total pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // + // This cannot be 0 if MaxUnavailable is 0. By default, 25% is used. + // + // Example: when this is set to 30%, the new RC can be scaled up by 30% + // immediately when the rolling update starts. Once old pods have been + // killed, new RC can be scaled up further, ensuring that total number of + // pods running at any time during the update is atmost 130% of original + // pods. + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,5,opt,name=maxSurge"` + // pre is a lifecycle hook which is executed before the deployment process + // begins. All LifecycleHookFailurePolicy values are supported. + Pre *LifecycleHook `json:"pre,omitempty" protobuf:"bytes,7,opt,name=pre"` + // post is a lifecycle hook which is executed after the strategy has + // finished all deployment logic. All LifecycleHookFailurePolicy values + // are supported. + Post *LifecycleHook `json:"post,omitempty" protobuf:"bytes,8,opt,name=post"` +} + +// LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time. +type LifecycleHook struct { + // failurePolicy specifies what action to take if the hook fails. + FailurePolicy LifecycleHookFailurePolicy `json:"failurePolicy" protobuf:"bytes,1,opt,name=failurePolicy,casttype=LifecycleHookFailurePolicy"` + + // execNewPod specifies the options for a lifecycle hook backed by a pod. + ExecNewPod *ExecNewPodHook `json:"execNewPod,omitempty" protobuf:"bytes,2,opt,name=execNewPod"` + + // tagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag. + TagImages []TagImageHook `json:"tagImages,omitempty" protobuf:"bytes,3,rep,name=tagImages"` +} + +// LifecycleHookFailurePolicy describes possibles actions to take if a hook fails. +type LifecycleHookFailurePolicy string + +const ( + // LifecycleHookFailurePolicyRetry means retry the hook until it succeeds. + LifecycleHookFailurePolicyRetry LifecycleHookFailurePolicy = "Retry" + // LifecycleHookFailurePolicyAbort means abort the deployment. + LifecycleHookFailurePolicyAbort LifecycleHookFailurePolicy = "Abort" + // LifecycleHookFailurePolicyIgnore means ignore failure and continue the deployment. + LifecycleHookFailurePolicyIgnore LifecycleHookFailurePolicy = "Ignore" +) + +// ExecNewPodHook is a hook implementation which runs a command in a new pod +// based on the specified container which is assumed to be part of the +// deployment template. +type ExecNewPodHook struct { + // command is the action command and its arguments. + Command []string `json:"command" protobuf:"bytes,1,rep,name=command"` + // env is a set of environment variables to supply to the hook pod's container. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"` + // containerName is the name of a container in the deployment pod template + // whose container image will be used for the hook pod's container. + ContainerName string `json:"containerName" protobuf:"bytes,3,opt,name=containerName"` + // volumes is a list of named volumes from the pod template which should be + // copied to the hook pod. Volumes names not found in pod spec are ignored. + // An empty list means no volumes will be copied. + Volumes []string `json:"volumes,omitempty" protobuf:"bytes,4,rep,name=volumes"` +} + +// TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag. +type TagImageHook struct { + // containerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single + // container this value will be defaulted to the name of that container. + ContainerName string `json:"containerName" protobuf:"bytes,1,opt,name=containerName"` + // to is the target ImageStreamTag to set the container's image onto. + To corev1.ObjectReference `json:"to" protobuf:"bytes,2,opt,name=to"` +} + +// DeploymentTriggerPolicies is a list of policies where nil values and different from empty arrays. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type DeploymentTriggerPolicies []DeploymentTriggerPolicy + +func (t DeploymentTriggerPolicies) String() string { + return fmt.Sprintf("%v", []DeploymentTriggerPolicy(t)) +} + +// DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment. +type DeploymentTriggerPolicy struct { + // type of the trigger + Type DeploymentTriggerType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentTriggerType"` + // imageChangeParams represents the parameters for the ImageChange trigger. + ImageChangeParams *DeploymentTriggerImageChangeParams `json:"imageChangeParams,omitempty" protobuf:"bytes,2,opt,name=imageChangeParams"` +} + +// DeploymentTriggerType refers to a specific DeploymentTriggerPolicy implementation. +type DeploymentTriggerType string + +const ( + // DeploymentTriggerOnImageChange will create new deployments in response to updated tags from + // a container image repository. + DeploymentTriggerOnImageChange DeploymentTriggerType = "ImageChange" + // DeploymentTriggerOnConfigChange will create new deployments in response to changes to + // the ControllerTemplate of a DeploymentConfig. + DeploymentTriggerOnConfigChange DeploymentTriggerType = "ConfigChange" +) + +// DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger. +type DeploymentTriggerImageChangeParams struct { + // automatic means that the detection of a new tag value should result in an image update + // inside the pod template. + Automatic bool `json:"automatic,omitempty" protobuf:"varint,1,opt,name=automatic"` + // containerNames is used to restrict tag updates to the specified set of container names in a pod. + // If multiple triggers point to the same containers, the resulting behavior is undefined. Future + // API versions will make this a validation error. If ContainerNames does not point to a valid container, + // the trigger will be ignored. Future API versions will make this a validation error. + ContainerNames []string `json:"containerNames,omitempty" protobuf:"bytes,2,rep,name=containerNames"` + // from is a reference to an image stream tag to watch for changes. From.Name is the only + // required subfield - if From.Namespace is blank, the namespace of the current deployment + // trigger will be used. + From corev1.ObjectReference `json:"from" protobuf:"bytes,3,opt,name=from"` + // lastTriggeredImage is the last image to be triggered. + LastTriggeredImage string `json:"lastTriggeredImage,omitempty" protobuf:"bytes,4,opt,name=lastTriggeredImage"` +} + +// DeploymentConfigStatus represents the current deployment state. +type DeploymentConfigStatus struct { + // latestVersion is used to determine whether the current deployment associated with a deployment + // config is out of sync. + LatestVersion int64 `json:"latestVersion" protobuf:"varint,1,opt,name=latestVersion"` + // observedGeneration is the most recent generation observed by the deployment config controller. + ObservedGeneration int64 `json:"observedGeneration" protobuf:"varint,2,opt,name=observedGeneration"` + // replicas is the total number of pods targeted by this deployment config. + Replicas int32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` + // updatedReplicas is the total number of non-terminated pods targeted by this deployment config + // that have the desired template spec. + UpdatedReplicas int32 `json:"updatedReplicas" protobuf:"varint,4,opt,name=updatedReplicas"` + // availableReplicas is the total number of available pods targeted by this deployment config. + AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,5,opt,name=availableReplicas"` + // unavailableReplicas is the total number of unavailable pods targeted by this deployment config. + UnavailableReplicas int32 `json:"unavailableReplicas" protobuf:"varint,6,opt,name=unavailableReplicas"` + // details are the reasons for the update to this deployment config. + // This could be based on a change made by the user or caused by an automatic trigger + Details *DeploymentDetails `json:"details,omitempty" protobuf:"bytes,7,opt,name=details"` + // conditions represents the latest available observations of a deployment config's current state. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,8,rep,name=conditions"` + // Total number of ready pods targeted by this deployment. + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,9,opt,name=readyReplicas"` +} + +// DeploymentDetails captures information about the causes of a deployment. +type DeploymentDetails struct { + // message is the user specified change message, if this deployment was triggered manually by the user + Message string `json:"message,omitempty" protobuf:"bytes,1,opt,name=message"` + // causes are extended data associated with all the causes for creating a new deployment + Causes []DeploymentCause `json:"causes" protobuf:"bytes,2,rep,name=causes"` +} + +// DeploymentCause captures information about a particular cause of a deployment. +type DeploymentCause struct { + // type of the trigger that resulted in the creation of a new deployment + Type DeploymentTriggerType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentTriggerType"` + // imageTrigger contains the image trigger details, if this trigger was fired based on an image change + ImageTrigger *DeploymentCauseImageTrigger `json:"imageTrigger,omitempty" protobuf:"bytes,2,opt,name=imageTrigger"` +} + +// DeploymentCauseImageTrigger represents details about the cause of a deployment originating +// from an image change trigger +type DeploymentCauseImageTrigger struct { + // from is a reference to the changed object which triggered a deployment. The field may have + // the kinds DockerImage, ImageStreamTag, or ImageStreamImage. + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` +} + +type DeploymentConditionType string + +// These are valid conditions of a DeploymentConfig. +const ( + // DeploymentAvailable means the DeploymentConfig is available, ie. at least the minimum available + // replicas required (dc.spec.replicas in case the DeploymentConfig is of Recreate type, + // dc.spec.replicas - dc.spec.strategy.rollingParams.maxUnavailable in case it's Rolling) are up and + // running for at least dc.spec.minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // DeploymentProgressing is: + // * True: the DeploymentConfig has been successfully deployed or is amidst getting deployed. + // The two different states can be determined by looking at the Reason of the Condition. + // For example, a complete DC will have {Status: True, Reason: NewReplicationControllerAvailable} + // and a DC in the middle of a rollout {Status: True, Reason: ReplicationControllerUpdated}. + // TODO: Represent a successfully deployed DC by using something else for Status like Unknown? + // * False: the DeploymentConfig has failed to deploy its latest version. + // + // This condition is purely informational and depends on the dc.spec.strategy.*params.timeoutSeconds + // field, which is responsible for the time in seconds to wait for a rollout before deciding that + // no progress can be made, thus the rollout is aborted. + // + // Progress for a DeploymentConfig is considered when new pods scale up or old pods scale down. + DeploymentProgressing DeploymentConditionType = "Progressing" + // DeploymentReplicaFailure is added in a deployment config when one of its pods + // fails to be created or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment config at a certain point. +type DeploymentCondition struct { + // type of deployment condition. + Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` + // status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` + // The last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=3.0 +// +k8s:prerelease-lifecycle-gen:deprecated=4.14 +// +k8s:prerelease-lifecycle-gen:removed=4.10000 + +// DeploymentConfigList is a collection of deployment configs. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type DeploymentConfigList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of deployment configs + Items []DeploymentConfig `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=3.0 +// +k8s:prerelease-lifecycle-gen:deprecated=4.14 +// +k8s:prerelease-lifecycle-gen:removed=4.10000 + +// DeploymentConfigRollback provides the input to rollback generation. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type DeploymentConfigRollback struct { + metav1.TypeMeta `json:",inline"` + // name of the deployment config that will be rolled back. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // updatedAnnotations is a set of new annotations that will be added in the deployment config. + UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"` + // spec defines the options to rollback generation. + Spec DeploymentConfigRollbackSpec `json:"spec" protobuf:"bytes,3,opt,name=spec"` +} + +// DeploymentConfigRollbackSpec represents the options for rollback generation. +type DeploymentConfigRollbackSpec struct { + // from points to a ReplicationController which is a deployment. + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + // revision to rollback to. If set to 0, rollback to the last revision. + Revision int64 `json:"revision,omitempty" protobuf:"varint,2,opt,name=revision"` + // includeTriggers specifies whether to include config Triggers. + IncludeTriggers bool `json:"includeTriggers" protobuf:"varint,3,opt,name=includeTriggers"` + // includeTemplate specifies whether to include the PodTemplateSpec. + IncludeTemplate bool `json:"includeTemplate" protobuf:"varint,4,opt,name=includeTemplate"` + // includeReplicationMeta specifies whether to include the replica count and selector. + IncludeReplicationMeta bool `json:"includeReplicationMeta" protobuf:"varint,5,opt,name=includeReplicationMeta"` + // includeStrategy specifies whether to include the deployment Strategy. + IncludeStrategy bool `json:"includeStrategy" protobuf:"varint,6,opt,name=includeStrategy"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=3.0 +// +k8s:prerelease-lifecycle-gen:deprecated=4.14 +// +k8s:prerelease-lifecycle-gen:removed=4.10000 + +// DeploymentRequest is a request to a deployment config for a new deployment. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type DeploymentRequest struct { + metav1.TypeMeta `json:",inline"` + // name of the deployment config for requesting a new deployment. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // latest will update the deployment config with the latest state from all triggers. + Latest bool `json:"latest" protobuf:"varint,2,opt,name=latest"` + // force will try to force a new deployment to run. If the deployment config is paused, + // then setting this to true will return an Invalid error. + Force bool `json:"force" protobuf:"varint,3,opt,name=force"` + // excludeTriggers instructs the instantiator to avoid processing the specified triggers. + // This field overrides the triggers from latest and allows clients to control specific + // logic. This field is ignored if not specified. + ExcludeTriggers []DeploymentTriggerType `json:"excludeTriggers,omitempty" protobuf:"bytes,4,rep,name=excludeTriggers,casttype=DeploymentTriggerType"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=3.0 +// +k8s:prerelease-lifecycle-gen:deprecated=4.14 +// +k8s:prerelease-lifecycle-gen:removed=4.10000 + +// DeploymentLog represents the logs for a deployment +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type DeploymentLog struct { + metav1.TypeMeta `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=3.0 +// +k8s:prerelease-lifecycle-gen:deprecated=4.14 +// +k8s:prerelease-lifecycle-gen:removed=4.10000 + +// DeploymentLogOptions is the REST options for a deployment log +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type DeploymentLogOptions struct { + metav1.TypeMeta `json:",inline"` + + // The container for which to stream logs. Defaults to only container if there is one container in the pod. + Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"` + // follow if true indicates that the build log should be streamed until + // the build terminates. + Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"` + // Return previous deployment logs. Defaults to false. + Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"` + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"` + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"` + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"` + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"` + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` + + // nowait if true causes the call to return immediately even if the deployment + // is not available yet. Otherwise the server will wait until the deployment has started. + // TODO: Fix the tag to 'noWait' in v2 + NoWait bool `json:"nowait,omitempty" protobuf:"varint,9,opt,name=nowait"` + + // version of the deployment for which to view logs. + Version *int64 `json:"version,omitempty" protobuf:"varint,10,opt,name=version"` +} diff --git a/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..11c22a80f3601 --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go @@ -0,0 +1,682 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDeploymentStrategyParams) DeepCopyInto(out *CustomDeploymentStrategyParams) { + *out = *in + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDeploymentStrategyParams. +func (in *CustomDeploymentStrategyParams) DeepCopy() *CustomDeploymentStrategyParams { + if in == nil { + return nil + } + out := new(CustomDeploymentStrategyParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCause) DeepCopyInto(out *DeploymentCause) { + *out = *in + if in.ImageTrigger != nil { + in, out := &in.ImageTrigger, &out.ImageTrigger + *out = new(DeploymentCauseImageTrigger) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCause. +func (in *DeploymentCause) DeepCopy() *DeploymentCause { + if in == nil { + return nil + } + out := new(DeploymentCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCauseImageTrigger) DeepCopyInto(out *DeploymentCauseImageTrigger) { + *out = *in + out.From = in.From + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCauseImageTrigger. +func (in *DeploymentCauseImageTrigger) DeepCopy() *DeploymentCauseImageTrigger { + if in == nil { + return nil + } + out := new(DeploymentCauseImageTrigger) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. +func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { + if in == nil { + return nil + } + out := new(DeploymentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfig) DeepCopyInto(out *DeploymentConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfig. +func (in *DeploymentConfig) DeepCopy() *DeploymentConfig { + if in == nil { + return nil + } + out := new(DeploymentConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigList) DeepCopyInto(out *DeploymentConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DeploymentConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigList. +func (in *DeploymentConfigList) DeepCopy() *DeploymentConfigList { + if in == nil { + return nil + } + out := new(DeploymentConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigRollback) DeepCopyInto(out *DeploymentConfigRollback) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.UpdatedAnnotations != nil { + in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.Spec = in.Spec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigRollback. +func (in *DeploymentConfigRollback) DeepCopy() *DeploymentConfigRollback { + if in == nil { + return nil + } + out := new(DeploymentConfigRollback) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentConfigRollback) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigRollbackSpec) DeepCopyInto(out *DeploymentConfigRollbackSpec) { + *out = *in + out.From = in.From + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigRollbackSpec. +func (in *DeploymentConfigRollbackSpec) DeepCopy() *DeploymentConfigRollbackSpec { + if in == nil { + return nil + } + out := new(DeploymentConfigRollbackSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigSpec) DeepCopyInto(out *DeploymentConfigSpec) { + *out = *in + in.Strategy.DeepCopyInto(&out.Strategy) + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make(DeploymentTriggerPolicies, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(corev1.PodTemplateSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigSpec. +func (in *DeploymentConfigSpec) DeepCopy() *DeploymentConfigSpec { + if in == nil { + return nil + } + out := new(DeploymentConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigStatus) DeepCopyInto(out *DeploymentConfigStatus) { + *out = *in + if in.Details != nil { + in, out := &in.Details, &out.Details + *out = new(DeploymentDetails) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigStatus. +func (in *DeploymentConfigStatus) DeepCopy() *DeploymentConfigStatus { + if in == nil { + return nil + } + out := new(DeploymentConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentDetails) DeepCopyInto(out *DeploymentDetails) { + *out = *in + if in.Causes != nil { + in, out := &in.Causes, &out.Causes + *out = make([]DeploymentCause, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentDetails. +func (in *DeploymentDetails) DeepCopy() *DeploymentDetails { + if in == nil { + return nil + } + out := new(DeploymentDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentLog) DeepCopyInto(out *DeploymentLog) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentLog. +func (in *DeploymentLog) DeepCopy() *DeploymentLog { + if in == nil { + return nil + } + out := new(DeploymentLog) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentLog) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentLogOptions) DeepCopyInto(out *DeploymentLogOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.SinceSeconds != nil { + in, out := &in.SinceSeconds, &out.SinceSeconds + *out = new(int64) + **out = **in + } + if in.SinceTime != nil { + in, out := &in.SinceTime, &out.SinceTime + *out = (*in).DeepCopy() + } + if in.TailLines != nil { + in, out := &in.TailLines, &out.TailLines + *out = new(int64) + **out = **in + } + if in.LimitBytes != nil { + in, out := &in.LimitBytes, &out.LimitBytes + *out = new(int64) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentLogOptions. +func (in *DeploymentLogOptions) DeepCopy() *DeploymentLogOptions { + if in == nil { + return nil + } + out := new(DeploymentLogOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentLogOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentRequest) DeepCopyInto(out *DeploymentRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ExcludeTriggers != nil { + in, out := &in.ExcludeTriggers, &out.ExcludeTriggers + *out = make([]DeploymentTriggerType, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRequest. +func (in *DeploymentRequest) DeepCopy() *DeploymentRequest { + if in == nil { + return nil + } + out := new(DeploymentRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { + *out = *in + if in.CustomParams != nil { + in, out := &in.CustomParams, &out.CustomParams + *out = new(CustomDeploymentStrategyParams) + (*in).DeepCopyInto(*out) + } + if in.RecreateParams != nil { + in, out := &in.RecreateParams, &out.RecreateParams + *out = new(RecreateDeploymentStrategyParams) + (*in).DeepCopyInto(*out) + } + if in.RollingParams != nil { + in, out := &in.RollingParams, &out.RollingParams + *out = new(RollingDeploymentStrategyParams) + (*in).DeepCopyInto(*out) + } + in.Resources.DeepCopyInto(&out.Resources) + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. +func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { + if in == nil { + return nil + } + out := new(DeploymentStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentTriggerImageChangeParams) DeepCopyInto(out *DeploymentTriggerImageChangeParams) { + *out = *in + if in.ContainerNames != nil { + in, out := &in.ContainerNames, &out.ContainerNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.From = in.From + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTriggerImageChangeParams. +func (in *DeploymentTriggerImageChangeParams) DeepCopy() *DeploymentTriggerImageChangeParams { + if in == nil { + return nil + } + out := new(DeploymentTriggerImageChangeParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in DeploymentTriggerPolicies) DeepCopyInto(out *DeploymentTriggerPolicies) { + { + in := &in + *out = make(DeploymentTriggerPolicies, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTriggerPolicies. +func (in DeploymentTriggerPolicies) DeepCopy() DeploymentTriggerPolicies { + if in == nil { + return nil + } + out := new(DeploymentTriggerPolicies) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentTriggerPolicy) DeepCopyInto(out *DeploymentTriggerPolicy) { + *out = *in + if in.ImageChangeParams != nil { + in, out := &in.ImageChangeParams, &out.ImageChangeParams + *out = new(DeploymentTriggerImageChangeParams) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTriggerPolicy. +func (in *DeploymentTriggerPolicy) DeepCopy() *DeploymentTriggerPolicy { + if in == nil { + return nil + } + out := new(DeploymentTriggerPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecNewPodHook) DeepCopyInto(out *ExecNewPodHook) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecNewPodHook. +func (in *ExecNewPodHook) DeepCopy() *ExecNewPodHook { + if in == nil { + return nil + } + out := new(ExecNewPodHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleHook) DeepCopyInto(out *LifecycleHook) { + *out = *in + if in.ExecNewPod != nil { + in, out := &in.ExecNewPod, &out.ExecNewPod + *out = new(ExecNewPodHook) + (*in).DeepCopyInto(*out) + } + if in.TagImages != nil { + in, out := &in.TagImages, &out.TagImages + *out = make([]TagImageHook, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHook. +func (in *LifecycleHook) DeepCopy() *LifecycleHook { + if in == nil { + return nil + } + out := new(LifecycleHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecreateDeploymentStrategyParams) DeepCopyInto(out *RecreateDeploymentStrategyParams) { + *out = *in + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + if in.Pre != nil { + in, out := &in.Pre, &out.Pre + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } + if in.Mid != nil { + in, out := &in.Mid, &out.Mid + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } + if in.Post != nil { + in, out := &in.Post, &out.Post + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecreateDeploymentStrategyParams. +func (in *RecreateDeploymentStrategyParams) DeepCopy() *RecreateDeploymentStrategyParams { + if in == nil { + return nil + } + out := new(RecreateDeploymentStrategyParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingDeploymentStrategyParams) DeepCopyInto(out *RollingDeploymentStrategyParams) { + *out = *in + if in.UpdatePeriodSeconds != nil { + in, out := &in.UpdatePeriodSeconds, &out.UpdatePeriodSeconds + *out = new(int64) + **out = **in + } + if in.IntervalSeconds != nil { + in, out := &in.IntervalSeconds, &out.IntervalSeconds + *out = new(int64) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(intstr.IntOrString) + **out = **in + } + if in.Pre != nil { + in, out := &in.Pre, &out.Pre + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } + if in.Post != nil { + in, out := &in.Post, &out.Post + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingDeploymentStrategyParams. +func (in *RollingDeploymentStrategyParams) DeepCopy() *RollingDeploymentStrategyParams { + if in == nil { + return nil + } + out := new(RollingDeploymentStrategyParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagImageHook) DeepCopyInto(out *TagImageHook) { + *out = *in + out.To = in.To + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagImageHook. +func (in *TagImageHook) DeepCopy() *TagImageHook { + if in == nil { + return nil + } + out := new(TagImageHook) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000..55b53c5daf2ef --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,284 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_CustomDeploymentStrategyParams = map[string]string{ + "": "CustomDeploymentStrategyParams are the input to the Custom deployment strategy.", + "image": "image specifies a container image which can carry out a deployment.", + "environment": "environment holds the environment which will be given to the container for Image.", + "command": "command is optional and overrides CMD in the container Image.", +} + +func (CustomDeploymentStrategyParams) SwaggerDoc() map[string]string { + return map_CustomDeploymentStrategyParams +} + +var map_DeploymentCause = map[string]string{ + "": "DeploymentCause captures information about a particular cause of a deployment.", + "type": "type of the trigger that resulted in the creation of a new deployment", + "imageTrigger": "imageTrigger contains the image trigger details, if this trigger was fired based on an image change", +} + +func (DeploymentCause) SwaggerDoc() map[string]string { + return map_DeploymentCause +} + +var map_DeploymentCauseImageTrigger = map[string]string{ + "": "DeploymentCauseImageTrigger represents details about the cause of a deployment originating from an image change trigger", + "from": "from is a reference to the changed object which triggered a deployment. The field may have the kinds DockerImage, ImageStreamTag, or ImageStreamImage.", +} + +func (DeploymentCauseImageTrigger) SwaggerDoc() map[string]string { + return map_DeploymentCauseImageTrigger +} + +var map_DeploymentCondition = map[string]string{ + "": "DeploymentCondition describes the state of a deployment config at a certain point.", + "type": "type of deployment condition.", + "status": "status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "lastTransitionTime": "The last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DeploymentCondition) SwaggerDoc() map[string]string { + return map_DeploymentCondition +} + +var map_DeploymentConfig = map[string]string{ + "": "Deployment Configs define the template for a pod and manages deploying new images or configuration changes. A single deployment configuration is usually analogous to a single micro-service. Can support many different deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller.\n\nA deployment is \"triggered\" when its configuration is changed or a tag in an Image Stream is changed. Triggers can be disabled to allow manual control over a deployment. The \"strategy\" determines how the deployment is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment is triggered by any means.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). Deprecated: Use deployments or other means for declarative updates for pods instead.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec represents a desired deployment state and how to deploy to it.", + "status": "status represents the current deployment state.", +} + +func (DeploymentConfig) SwaggerDoc() map[string]string { + return map_DeploymentConfig +} + +var map_DeploymentConfigList = map[string]string{ + "": "DeploymentConfigList is a collection of deployment configs.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of deployment configs", +} + +func (DeploymentConfigList) SwaggerDoc() map[string]string { + return map_DeploymentConfigList +} + +var map_DeploymentConfigRollback = map[string]string{ + "": "DeploymentConfigRollback provides the input to rollback generation.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "name": "name of the deployment config that will be rolled back.", + "updatedAnnotations": "updatedAnnotations is a set of new annotations that will be added in the deployment config.", + "spec": "spec defines the options to rollback generation.", +} + +func (DeploymentConfigRollback) SwaggerDoc() map[string]string { + return map_DeploymentConfigRollback +} + +var map_DeploymentConfigRollbackSpec = map[string]string{ + "": "DeploymentConfigRollbackSpec represents the options for rollback generation.", + "from": "from points to a ReplicationController which is a deployment.", + "revision": "revision to rollback to. If set to 0, rollback to the last revision.", + "includeTriggers": "includeTriggers specifies whether to include config Triggers.", + "includeTemplate": "includeTemplate specifies whether to include the PodTemplateSpec.", + "includeReplicationMeta": "includeReplicationMeta specifies whether to include the replica count and selector.", + "includeStrategy": "includeStrategy specifies whether to include the deployment Strategy.", +} + +func (DeploymentConfigRollbackSpec) SwaggerDoc() map[string]string { + return map_DeploymentConfigRollbackSpec +} + +var map_DeploymentConfigSpec = map[string]string{ + "": "DeploymentConfigSpec represents the desired state of the deployment.", + "strategy": "strategy describes how a deployment is executed.", + "minReadySeconds": "minReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "triggers": "triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers are defined, a new deployment can only occur as a result of an explicit client update to the DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger.", + "replicas": "replicas is the number of desired replicas.", + "revisionHistoryLimit": "revisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. This field is a pointer to allow for differentiation between an explicit zero and not specified. Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.)", + "test": "test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action.", + "paused": "paused indicates that the deployment config is paused resulting in no new deployments on template changes or changes in the template caused by other triggers.", + "selector": "selector is a label query over pods that should match the Replicas count.", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected.", +} + +func (DeploymentConfigSpec) SwaggerDoc() map[string]string { + return map_DeploymentConfigSpec +} + +var map_DeploymentConfigStatus = map[string]string{ + "": "DeploymentConfigStatus represents the current deployment state.", + "latestVersion": "latestVersion is used to determine whether the current deployment associated with a deployment config is out of sync.", + "observedGeneration": "observedGeneration is the most recent generation observed by the deployment config controller.", + "replicas": "replicas is the total number of pods targeted by this deployment config.", + "updatedReplicas": "updatedReplicas is the total number of non-terminated pods targeted by this deployment config that have the desired template spec.", + "availableReplicas": "availableReplicas is the total number of available pods targeted by this deployment config.", + "unavailableReplicas": "unavailableReplicas is the total number of unavailable pods targeted by this deployment config.", + "details": "details are the reasons for the update to this deployment config. This could be based on a change made by the user or caused by an automatic trigger", + "conditions": "conditions represents the latest available observations of a deployment config's current state.", + "readyReplicas": "Total number of ready pods targeted by this deployment.", +} + +func (DeploymentConfigStatus) SwaggerDoc() map[string]string { + return map_DeploymentConfigStatus +} + +var map_DeploymentDetails = map[string]string{ + "": "DeploymentDetails captures information about the causes of a deployment.", + "message": "message is the user specified change message, if this deployment was triggered manually by the user", + "causes": "causes are extended data associated with all the causes for creating a new deployment", +} + +func (DeploymentDetails) SwaggerDoc() map[string]string { + return map_DeploymentDetails +} + +var map_DeploymentLog = map[string]string{ + "": "DeploymentLog represents the logs for a deployment\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (DeploymentLog) SwaggerDoc() map[string]string { + return map_DeploymentLog +} + +var map_DeploymentLogOptions = map[string]string{ + "": "DeploymentLogOptions is the REST options for a deployment log\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.", + "follow": "follow if true indicates that the build log should be streamed until the build terminates.", + "previous": "Return previous deployment logs. Defaults to false.", + "sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", + "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", + "limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", + "nowait": "nowait if true causes the call to return immediately even if the deployment is not available yet. Otherwise the server will wait until the deployment has started.", + "version": "version of the deployment for which to view logs.", +} + +func (DeploymentLogOptions) SwaggerDoc() map[string]string { + return map_DeploymentLogOptions +} + +var map_DeploymentRequest = map[string]string{ + "": "DeploymentRequest is a request to a deployment config for a new deployment.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "name": "name of the deployment config for requesting a new deployment.", + "latest": "latest will update the deployment config with the latest state from all triggers.", + "force": "force will try to force a new deployment to run. If the deployment config is paused, then setting this to true will return an Invalid error.", + "excludeTriggers": "excludeTriggers instructs the instantiator to avoid processing the specified triggers. This field overrides the triggers from latest and allows clients to control specific logic. This field is ignored if not specified.", +} + +func (DeploymentRequest) SwaggerDoc() map[string]string { + return map_DeploymentRequest +} + +var map_DeploymentStrategy = map[string]string{ + "": "DeploymentStrategy describes how to perform a deployment.", + "type": "type is the name of a deployment strategy.", + "customParams": "customParams are the input to the Custom deployment strategy, and may also be specified for the Recreate and Rolling strategies to customize the execution process that runs the deployment.", + "recreateParams": "recreateParams are the input to the Recreate deployment strategy.", + "rollingParams": "rollingParams are the input to the Rolling deployment strategy.", + "resources": "resources contains resource requirements to execute the deployment and any hooks.", + "labels": "labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.", + "annotations": "annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.", + "activeDeadlineSeconds": "activeDeadlineSeconds is the duration in seconds that the deployer pods for this deployment config may be active on a node before the system actively tries to terminate them.", +} + +func (DeploymentStrategy) SwaggerDoc() map[string]string { + return map_DeploymentStrategy +} + +var map_DeploymentTriggerImageChangeParams = map[string]string{ + "": "DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger.", + "automatic": "automatic means that the detection of a new tag value should result in an image update inside the pod template.", + "containerNames": "containerNames is used to restrict tag updates to the specified set of container names in a pod. If multiple triggers point to the same containers, the resulting behavior is undefined. Future API versions will make this a validation error. If ContainerNames does not point to a valid container, the trigger will be ignored. Future API versions will make this a validation error.", + "from": "from is a reference to an image stream tag to watch for changes. From.Name is the only required subfield - if From.Namespace is blank, the namespace of the current deployment trigger will be used.", + "lastTriggeredImage": "lastTriggeredImage is the last image to be triggered.", +} + +func (DeploymentTriggerImageChangeParams) SwaggerDoc() map[string]string { + return map_DeploymentTriggerImageChangeParams +} + +var map_DeploymentTriggerPolicy = map[string]string{ + "": "DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment.", + "type": "type of the trigger", + "imageChangeParams": "imageChangeParams represents the parameters for the ImageChange trigger.", +} + +func (DeploymentTriggerPolicy) SwaggerDoc() map[string]string { + return map_DeploymentTriggerPolicy +} + +var map_ExecNewPodHook = map[string]string{ + "": "ExecNewPodHook is a hook implementation which runs a command in a new pod based on the specified container which is assumed to be part of the deployment template.", + "command": "command is the action command and its arguments.", + "env": "env is a set of environment variables to supply to the hook pod's container.", + "containerName": "containerName is the name of a container in the deployment pod template whose container image will be used for the hook pod's container.", + "volumes": "volumes is a list of named volumes from the pod template which should be copied to the hook pod. Volumes names not found in pod spec are ignored. An empty list means no volumes will be copied.", +} + +func (ExecNewPodHook) SwaggerDoc() map[string]string { + return map_ExecNewPodHook +} + +var map_LifecycleHook = map[string]string{ + "": "LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time.", + "failurePolicy": "failurePolicy specifies what action to take if the hook fails.", + "execNewPod": "execNewPod specifies the options for a lifecycle hook backed by a pod.", + "tagImages": "tagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.", +} + +func (LifecycleHook) SwaggerDoc() map[string]string { + return map_LifecycleHook +} + +var map_RecreateDeploymentStrategyParams = map[string]string{ + "": "RecreateDeploymentStrategyParams are the input to the Recreate deployment strategy.", + "timeoutSeconds": "timeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", + "pre": "pre is a lifecycle hook which is executed before the strategy manipulates the deployment. All LifecycleHookFailurePolicy values are supported.", + "mid": "mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new pod is created. All LifecycleHookFailurePolicy values are supported.", + "post": "post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.", +} + +func (RecreateDeploymentStrategyParams) SwaggerDoc() map[string]string { + return map_RecreateDeploymentStrategyParams +} + +var map_RollingDeploymentStrategyParams = map[string]string{ + "": "RollingDeploymentStrategyParams are the input to the Rolling deployment strategy.", + "updatePeriodSeconds": "updatePeriodSeconds is the time to wait between individual pod updates. If the value is nil, a default will be used.", + "intervalSeconds": "intervalSeconds is the time to wait between polling deployment status after update. If the value is nil, a default will be used.", + "timeoutSeconds": "timeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", + "maxUnavailable": "maxUnavailable is the maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). Absolute number is calculated from percentage by rounding down.\n\nThis cannot be 0 if MaxSurge is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the old RC can be scaled down by 30% immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that at least 70% of original number of pods are available at all times during the update.", + "maxSurge": "maxSurge is the maximum number of pods that can be scheduled above the original number of pods. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up.\n\nThis cannot be 0 if MaxUnavailable is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the new RC can be scaled up by 30% immediately when the rolling update starts. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of original pods.", + "pre": "pre is a lifecycle hook which is executed before the deployment process begins. All LifecycleHookFailurePolicy values are supported.", + "post": "post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.", +} + +func (RollingDeploymentStrategyParams) SwaggerDoc() map[string]string { + return map_RollingDeploymentStrategyParams +} + +var map_TagImageHook = map[string]string{ + "": "TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag.", + "containerName": "containerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single container this value will be defaulted to the name of that container.", + "to": "to is the target ImageStreamTag to set the container's image onto.", +} + +func (TagImageHook) SwaggerDoc() map[string]string { + return map_TagImageHook +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/apps/v1/zz_prerelease_lifecycle_generated.go b/vendor/github.com/openshift/api/apps/v1/zz_prerelease_lifecycle_generated.go new file mode 100644 index 0000000000000..b3e4de501068e --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/zz_prerelease_lifecycle_generated.go @@ -0,0 +1,114 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeploymentConfig) APILifecycleIntroduced() (major, minor int) { + return 3, 0 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *DeploymentConfig) APILifecycleDeprecated() (major, minor int) { + return 4, 14 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *DeploymentConfig) APILifecycleRemoved() (major, minor int) { + return 4, 10000 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeploymentConfigList) APILifecycleIntroduced() (major, minor int) { + return 3, 0 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *DeploymentConfigList) APILifecycleDeprecated() (major, minor int) { + return 4, 14 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *DeploymentConfigList) APILifecycleRemoved() (major, minor int) { + return 4, 10000 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeploymentConfigRollback) APILifecycleIntroduced() (major, minor int) { + return 3, 0 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *DeploymentConfigRollback) APILifecycleDeprecated() (major, minor int) { + return 4, 14 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *DeploymentConfigRollback) APILifecycleRemoved() (major, minor int) { + return 4, 10000 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeploymentLog) APILifecycleIntroduced() (major, minor int) { + return 3, 0 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *DeploymentLog) APILifecycleDeprecated() (major, minor int) { + return 4, 14 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *DeploymentLog) APILifecycleRemoved() (major, minor int) { + return 4, 10000 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeploymentLogOptions) APILifecycleIntroduced() (major, minor int) { + return 3, 0 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *DeploymentLogOptions) APILifecycleDeprecated() (major, minor int) { + return 4, 14 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *DeploymentLogOptions) APILifecycleRemoved() (major, minor int) { + return 4, 10000 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeploymentRequest) APILifecycleIntroduced() (major, minor int) { + return 3, 0 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *DeploymentRequest) APILifecycleDeprecated() (major, minor int) { + return 4, 14 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *DeploymentRequest) APILifecycleRemoved() (major, minor int) { + return 4, 10000 +} diff --git a/vendor/github.com/openshift/api/authorization/v1/Makefile b/vendor/github.com/openshift/api/authorization/v1/Makefile new file mode 100644 index 0000000000000..1e47c9fd97c15 --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="authorization.openshift.io/v1" diff --git a/vendor/github.com/openshift/api/authorization/v1/codec.go b/vendor/github.com/openshift/api/authorization/v1/codec.go new file mode 100644 index 0000000000000..61f1f9f514666 --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/codec.go @@ -0,0 +1,139 @@ +package v1 + +import ( + "github.com/openshift/api/pkg/serialization" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +var _ runtime.NestedObjectDecoder = &PolicyRule{} +var _ runtime.NestedObjectEncoder = &PolicyRule{} + +func (c *PolicyRule) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + serialization.DecodeNestedRawExtensionOrUnknown(d, &c.AttributeRestrictions) + return nil +} +func (c *PolicyRule) EncodeNestedObjects(e runtime.Encoder) error { + return serialization.EncodeNestedRawExtension(e, &c.AttributeRestrictions) +} + +var _ runtime.NestedObjectDecoder = &SelfSubjectRulesReview{} +var _ runtime.NestedObjectEncoder = &SelfSubjectRulesReview{} + +func (c *SelfSubjectRulesReview) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + for i := range c.Status.Rules { + c.Status.Rules[i].DecodeNestedObjects(d) + } + return nil +} +func (c *SelfSubjectRulesReview) EncodeNestedObjects(e runtime.Encoder) error { + for i := range c.Status.Rules { + if err := c.Status.Rules[i].EncodeNestedObjects(e); err != nil { + return err + } + } + return nil +} + +var _ runtime.NestedObjectDecoder = &SubjectRulesReview{} +var _ runtime.NestedObjectEncoder = &SubjectRulesReview{} + +func (c *SubjectRulesReview) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + for i := range c.Status.Rules { + c.Status.Rules[i].DecodeNestedObjects(d) + } + return nil +} +func (c *SubjectRulesReview) EncodeNestedObjects(e runtime.Encoder) error { + for i := range c.Status.Rules { + if err := c.Status.Rules[i].EncodeNestedObjects(e); err != nil { + return err + } + } + return nil +} + +var _ runtime.NestedObjectDecoder = &ClusterRole{} +var _ runtime.NestedObjectEncoder = &ClusterRole{} + +func (c *ClusterRole) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + for i := range c.Rules { + c.Rules[i].DecodeNestedObjects(d) + } + return nil +} +func (c *ClusterRole) EncodeNestedObjects(e runtime.Encoder) error { + for i := range c.Rules { + if err := c.Rules[i].EncodeNestedObjects(e); err != nil { + return err + } + } + return nil +} + +var _ runtime.NestedObjectDecoder = &Role{} +var _ runtime.NestedObjectEncoder = &Role{} + +func (c *Role) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + for i := range c.Rules { + c.Rules[i].DecodeNestedObjects(d) + } + return nil +} +func (c *Role) EncodeNestedObjects(e runtime.Encoder) error { + for i := range c.Rules { + if err := c.Rules[i].EncodeNestedObjects(e); err != nil { + return err + } + } + return nil +} + +var _ runtime.NestedObjectDecoder = &ClusterRoleList{} +var _ runtime.NestedObjectEncoder = &ClusterRoleList{} + +func (c *ClusterRoleList) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + for i := range c.Items { + c.Items[i].DecodeNestedObjects(d) + } + return nil +} +func (c *ClusterRoleList) EncodeNestedObjects(e runtime.Encoder) error { + for i := range c.Items { + if err := c.Items[i].EncodeNestedObjects(e); err != nil { + return err + } + } + return nil +} + +var _ runtime.NestedObjectDecoder = &RoleList{} +var _ runtime.NestedObjectEncoder = &RoleList{} + +func (c *RoleList) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + for i := range c.Items { + c.Items[i].DecodeNestedObjects(d) + } + return nil +} +func (c *RoleList) EncodeNestedObjects(e runtime.Encoder) error { + for i := range c.Items { + if err := c.Items[i].EncodeNestedObjects(e); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/openshift/api/authorization/v1/doc.go b/vendor/github.com/openshift/api/authorization/v1/doc.go new file mode 100644 index 0000000000000..a66741dce6b5d --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/doc.go @@ -0,0 +1,9 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/authorization/apis/authorization +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=authorization.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/authorization/v1/generated.pb.go b/vendor/github.com/openshift/api/authorization/v1/generated.pb.go new file mode 100644 index 0000000000000..c52cebf07ac09 --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/generated.pb.go @@ -0,0 +1,9090 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/authorization/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v12 "k8s.io/api/core/v1" + v11 "k8s.io/api/rbac/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Action) Reset() { *m = Action{} } +func (*Action) ProtoMessage() {} +func (*Action) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{0} +} +func (m *Action) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Action) XXX_Merge(src proto.Message) { + xxx_messageInfo_Action.Merge(m, src) +} +func (m *Action) XXX_Size() int { + return m.Size() +} +func (m *Action) XXX_DiscardUnknown() { + xxx_messageInfo_Action.DiscardUnknown(m) +} + +var xxx_messageInfo_Action proto.InternalMessageInfo + +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{1} +} +func (m *ClusterRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRole.Merge(m, src) +} +func (m *ClusterRole) XXX_Size() int { + return m.Size() +} +func (m *ClusterRole) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRole.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRole proto.InternalMessageInfo + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{2} +} +func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBinding.Merge(m, src) +} +func (m *ClusterRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo + +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{3} +} +func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBindingList.Merge(m, src) +} +func (m *ClusterRoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo + +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{4} +} +func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleList.Merge(m, src) +} +func (m *ClusterRoleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo + +func (m *GroupRestriction) Reset() { *m = GroupRestriction{} } +func (*GroupRestriction) ProtoMessage() {} +func (*GroupRestriction) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{5} +} +func (m *GroupRestriction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupRestriction) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupRestriction.Merge(m, src) +} +func (m *GroupRestriction) XXX_Size() int { + return m.Size() +} +func (m *GroupRestriction) XXX_DiscardUnknown() { + xxx_messageInfo_GroupRestriction.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupRestriction proto.InternalMessageInfo + +func (m *IsPersonalSubjectAccessReview) Reset() { *m = IsPersonalSubjectAccessReview{} } +func (*IsPersonalSubjectAccessReview) ProtoMessage() {} +func (*IsPersonalSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{6} +} +func (m *IsPersonalSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IsPersonalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IsPersonalSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_IsPersonalSubjectAccessReview.Merge(m, src) +} +func (m *IsPersonalSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *IsPersonalSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_IsPersonalSubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_IsPersonalSubjectAccessReview proto.InternalMessageInfo + +func (m *LocalResourceAccessReview) Reset() { *m = LocalResourceAccessReview{} } +func (*LocalResourceAccessReview) ProtoMessage() {} +func (*LocalResourceAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{7} +} +func (m *LocalResourceAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalResourceAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalResourceAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalResourceAccessReview.Merge(m, src) +} +func (m *LocalResourceAccessReview) XXX_Size() int { + return m.Size() +} +func (m *LocalResourceAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_LocalResourceAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_LocalResourceAccessReview proto.InternalMessageInfo + +func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } +func (*LocalSubjectAccessReview) ProtoMessage() {} +func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{8} +} +func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalSubjectAccessReview.Merge(m, src) +} +func (m *LocalSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *LocalSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_LocalSubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo + +func (m *NamedClusterRole) Reset() { *m = NamedClusterRole{} } +func (*NamedClusterRole) ProtoMessage() {} +func (*NamedClusterRole) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{9} +} +func (m *NamedClusterRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamedClusterRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedClusterRole.Merge(m, src) +} +func (m *NamedClusterRole) XXX_Size() int { + return m.Size() +} +func (m *NamedClusterRole) XXX_DiscardUnknown() { + xxx_messageInfo_NamedClusterRole.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedClusterRole proto.InternalMessageInfo + +func (m *NamedClusterRoleBinding) Reset() { *m = NamedClusterRoleBinding{} } +func (*NamedClusterRoleBinding) ProtoMessage() {} +func (*NamedClusterRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{10} +} +func (m *NamedClusterRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamedClusterRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedClusterRoleBinding.Merge(m, src) +} +func (m *NamedClusterRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *NamedClusterRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_NamedClusterRoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedClusterRoleBinding proto.InternalMessageInfo + +func (m *NamedRole) Reset() { *m = NamedRole{} } +func (*NamedRole) ProtoMessage() {} +func (*NamedRole) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{11} +} +func (m *NamedRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamedRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedRole.Merge(m, src) +} +func (m *NamedRole) XXX_Size() int { + return m.Size() +} +func (m *NamedRole) XXX_DiscardUnknown() { + xxx_messageInfo_NamedRole.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedRole proto.InternalMessageInfo + +func (m *NamedRoleBinding) Reset() { *m = NamedRoleBinding{} } +func (*NamedRoleBinding) ProtoMessage() {} +func (*NamedRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{12} +} +func (m *NamedRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamedRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedRoleBinding.Merge(m, src) +} +func (m *NamedRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *NamedRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_NamedRoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedRoleBinding proto.InternalMessageInfo + +func (m *OptionalNames) Reset() { *m = OptionalNames{} } +func (*OptionalNames) ProtoMessage() {} +func (*OptionalNames) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{13} +} +func (m *OptionalNames) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OptionalNames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OptionalNames) XXX_Merge(src proto.Message) { + xxx_messageInfo_OptionalNames.Merge(m, src) +} +func (m *OptionalNames) XXX_Size() int { + return m.Size() +} +func (m *OptionalNames) XXX_DiscardUnknown() { + xxx_messageInfo_OptionalNames.DiscardUnknown(m) +} + +var xxx_messageInfo_OptionalNames proto.InternalMessageInfo + +func (m *OptionalScopes) Reset() { *m = OptionalScopes{} } +func (*OptionalScopes) ProtoMessage() {} +func (*OptionalScopes) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{14} +} +func (m *OptionalScopes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OptionalScopes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OptionalScopes) XXX_Merge(src proto.Message) { + xxx_messageInfo_OptionalScopes.Merge(m, src) +} +func (m *OptionalScopes) XXX_Size() int { + return m.Size() +} +func (m *OptionalScopes) XXX_DiscardUnknown() { + xxx_messageInfo_OptionalScopes.DiscardUnknown(m) +} + +var xxx_messageInfo_OptionalScopes proto.InternalMessageInfo + +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{15} +} +func (m *PolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRule.Merge(m, src) +} +func (m *PolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyRule proto.InternalMessageInfo + +func (m *ResourceAccessReview) Reset() { *m = ResourceAccessReview{} } +func (*ResourceAccessReview) ProtoMessage() {} +func (*ResourceAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{16} +} +func (m *ResourceAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAccessReview.Merge(m, src) +} +func (m *ResourceAccessReview) XXX_Size() int { + return m.Size() +} +func (m *ResourceAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAccessReview proto.InternalMessageInfo + +func (m *ResourceAccessReviewResponse) Reset() { *m = ResourceAccessReviewResponse{} } +func (*ResourceAccessReviewResponse) ProtoMessage() {} +func (*ResourceAccessReviewResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{17} +} +func (m *ResourceAccessReviewResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAccessReviewResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceAccessReviewResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAccessReviewResponse.Merge(m, src) +} +func (m *ResourceAccessReviewResponse) XXX_Size() int { + return m.Size() +} +func (m *ResourceAccessReviewResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAccessReviewResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAccessReviewResponse proto.InternalMessageInfo + +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{18} +} +func (m *Role) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Role) XXX_Merge(src proto.Message) { + xxx_messageInfo_Role.Merge(m, src) +} +func (m *Role) XXX_Size() int { + return m.Size() +} +func (m *Role) XXX_DiscardUnknown() { + xxx_messageInfo_Role.DiscardUnknown(m) +} + +var xxx_messageInfo_Role proto.InternalMessageInfo + +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{19} +} +func (m *RoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBinding.Merge(m, src) +} +func (m *RoleBinding) XXX_Size() int { + return m.Size() +} +func (m *RoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBinding proto.InternalMessageInfo + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{20} +} +func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingList.Merge(m, src) +} +func (m *RoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo + +func (m *RoleBindingRestriction) Reset() { *m = RoleBindingRestriction{} } +func (*RoleBindingRestriction) ProtoMessage() {} +func (*RoleBindingRestriction) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{21} +} +func (m *RoleBindingRestriction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingRestriction) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingRestriction.Merge(m, src) +} +func (m *RoleBindingRestriction) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingRestriction) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingRestriction.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingRestriction proto.InternalMessageInfo + +func (m *RoleBindingRestrictionList) Reset() { *m = RoleBindingRestrictionList{} } +func (*RoleBindingRestrictionList) ProtoMessage() {} +func (*RoleBindingRestrictionList) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{22} +} +func (m *RoleBindingRestrictionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingRestrictionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingRestrictionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingRestrictionList.Merge(m, src) +} +func (m *RoleBindingRestrictionList) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingRestrictionList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingRestrictionList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingRestrictionList proto.InternalMessageInfo + +func (m *RoleBindingRestrictionSpec) Reset() { *m = RoleBindingRestrictionSpec{} } +func (*RoleBindingRestrictionSpec) ProtoMessage() {} +func (*RoleBindingRestrictionSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{23} +} +func (m *RoleBindingRestrictionSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingRestrictionSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingRestrictionSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingRestrictionSpec.Merge(m, src) +} +func (m *RoleBindingRestrictionSpec) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingRestrictionSpec) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingRestrictionSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingRestrictionSpec proto.InternalMessageInfo + +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{24} +} +func (m *RoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleList.Merge(m, src) +} +func (m *RoleList) XXX_Size() int { + return m.Size() +} +func (m *RoleList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleList proto.InternalMessageInfo + +func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } +func (*SelfSubjectRulesReview) ProtoMessage() {} +func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{25} +} +func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReview.Merge(m, src) +} +func (m *SelfSubjectRulesReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo + +func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } +func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} +func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{26} +} +func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReviewSpec.Merge(m, src) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo + +func (m *ServiceAccountReference) Reset() { *m = ServiceAccountReference{} } +func (*ServiceAccountReference) ProtoMessage() {} +func (*ServiceAccountReference) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{27} +} +func (m *ServiceAccountReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceAccountReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountReference.Merge(m, src) +} +func (m *ServiceAccountReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountReference proto.InternalMessageInfo + +func (m *ServiceAccountRestriction) Reset() { *m = ServiceAccountRestriction{} } +func (*ServiceAccountRestriction) ProtoMessage() {} +func (*ServiceAccountRestriction) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{28} +} +func (m *ServiceAccountRestriction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceAccountRestriction) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountRestriction.Merge(m, src) +} +func (m *ServiceAccountRestriction) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountRestriction) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountRestriction.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountRestriction proto.InternalMessageInfo + +func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } +func (*SubjectAccessReview) ProtoMessage() {} +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{29} +} +func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReview.Merge(m, src) +} +func (m *SubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo + +func (m *SubjectAccessReviewResponse) Reset() { *m = SubjectAccessReviewResponse{} } +func (*SubjectAccessReviewResponse) ProtoMessage() {} +func (*SubjectAccessReviewResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{30} +} +func (m *SubjectAccessReviewResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReviewResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReviewResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReviewResponse.Merge(m, src) +} +func (m *SubjectAccessReviewResponse) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReviewResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReviewResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReviewResponse proto.InternalMessageInfo + +func (m *SubjectRulesReview) Reset() { *m = SubjectRulesReview{} } +func (*SubjectRulesReview) ProtoMessage() {} +func (*SubjectRulesReview) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{31} +} +func (m *SubjectRulesReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectRulesReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectRulesReview.Merge(m, src) +} +func (m *SubjectRulesReview) XXX_Size() int { + return m.Size() +} +func (m *SubjectRulesReview) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectRulesReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectRulesReview proto.InternalMessageInfo + +func (m *SubjectRulesReviewSpec) Reset() { *m = SubjectRulesReviewSpec{} } +func (*SubjectRulesReviewSpec) ProtoMessage() {} +func (*SubjectRulesReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{32} +} +func (m *SubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectRulesReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectRulesReviewSpec.Merge(m, src) +} +func (m *SubjectRulesReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SubjectRulesReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectRulesReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectRulesReviewSpec proto.InternalMessageInfo + +func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } +func (*SubjectRulesReviewStatus) ProtoMessage() {} +func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{33} +} +func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectRulesReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectRulesReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectRulesReviewStatus.Merge(m, src) +} +func (m *SubjectRulesReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SubjectRulesReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectRulesReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo + +func (m *UserRestriction) Reset() { *m = UserRestriction{} } +func (*UserRestriction) ProtoMessage() {} +func (*UserRestriction) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{34} +} +func (m *UserRestriction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserRestriction) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserRestriction.Merge(m, src) +} +func (m *UserRestriction) XXX_Size() int { + return m.Size() +} +func (m *UserRestriction) XXX_DiscardUnknown() { + xxx_messageInfo_UserRestriction.DiscardUnknown(m) +} + +var xxx_messageInfo_UserRestriction proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Action)(nil), "github.com.openshift.api.authorization.v1.Action") + proto.RegisterType((*ClusterRole)(nil), "github.com.openshift.api.authorization.v1.ClusterRole") + proto.RegisterType((*ClusterRoleBinding)(nil), "github.com.openshift.api.authorization.v1.ClusterRoleBinding") + proto.RegisterType((*ClusterRoleBindingList)(nil), "github.com.openshift.api.authorization.v1.ClusterRoleBindingList") + proto.RegisterType((*ClusterRoleList)(nil), "github.com.openshift.api.authorization.v1.ClusterRoleList") + proto.RegisterType((*GroupRestriction)(nil), "github.com.openshift.api.authorization.v1.GroupRestriction") + proto.RegisterType((*IsPersonalSubjectAccessReview)(nil), "github.com.openshift.api.authorization.v1.IsPersonalSubjectAccessReview") + proto.RegisterType((*LocalResourceAccessReview)(nil), "github.com.openshift.api.authorization.v1.LocalResourceAccessReview") + proto.RegisterType((*LocalSubjectAccessReview)(nil), "github.com.openshift.api.authorization.v1.LocalSubjectAccessReview") + proto.RegisterType((*NamedClusterRole)(nil), "github.com.openshift.api.authorization.v1.NamedClusterRole") + proto.RegisterType((*NamedClusterRoleBinding)(nil), "github.com.openshift.api.authorization.v1.NamedClusterRoleBinding") + proto.RegisterType((*NamedRole)(nil), "github.com.openshift.api.authorization.v1.NamedRole") + proto.RegisterType((*NamedRoleBinding)(nil), "github.com.openshift.api.authorization.v1.NamedRoleBinding") + proto.RegisterType((*OptionalNames)(nil), "github.com.openshift.api.authorization.v1.OptionalNames") + proto.RegisterType((*OptionalScopes)(nil), "github.com.openshift.api.authorization.v1.OptionalScopes") + proto.RegisterType((*PolicyRule)(nil), "github.com.openshift.api.authorization.v1.PolicyRule") + proto.RegisterType((*ResourceAccessReview)(nil), "github.com.openshift.api.authorization.v1.ResourceAccessReview") + proto.RegisterType((*ResourceAccessReviewResponse)(nil), "github.com.openshift.api.authorization.v1.ResourceAccessReviewResponse") + proto.RegisterType((*Role)(nil), "github.com.openshift.api.authorization.v1.Role") + proto.RegisterType((*RoleBinding)(nil), "github.com.openshift.api.authorization.v1.RoleBinding") + proto.RegisterType((*RoleBindingList)(nil), "github.com.openshift.api.authorization.v1.RoleBindingList") + proto.RegisterType((*RoleBindingRestriction)(nil), "github.com.openshift.api.authorization.v1.RoleBindingRestriction") + proto.RegisterType((*RoleBindingRestrictionList)(nil), "github.com.openshift.api.authorization.v1.RoleBindingRestrictionList") + proto.RegisterType((*RoleBindingRestrictionSpec)(nil), "github.com.openshift.api.authorization.v1.RoleBindingRestrictionSpec") + proto.RegisterType((*RoleList)(nil), "github.com.openshift.api.authorization.v1.RoleList") + proto.RegisterType((*SelfSubjectRulesReview)(nil), "github.com.openshift.api.authorization.v1.SelfSubjectRulesReview") + proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "github.com.openshift.api.authorization.v1.SelfSubjectRulesReviewSpec") + proto.RegisterType((*ServiceAccountReference)(nil), "github.com.openshift.api.authorization.v1.ServiceAccountReference") + proto.RegisterType((*ServiceAccountRestriction)(nil), "github.com.openshift.api.authorization.v1.ServiceAccountRestriction") + proto.RegisterType((*SubjectAccessReview)(nil), "github.com.openshift.api.authorization.v1.SubjectAccessReview") + proto.RegisterType((*SubjectAccessReviewResponse)(nil), "github.com.openshift.api.authorization.v1.SubjectAccessReviewResponse") + proto.RegisterType((*SubjectRulesReview)(nil), "github.com.openshift.api.authorization.v1.SubjectRulesReview") + proto.RegisterType((*SubjectRulesReviewSpec)(nil), "github.com.openshift.api.authorization.v1.SubjectRulesReviewSpec") + proto.RegisterType((*SubjectRulesReviewStatus)(nil), "github.com.openshift.api.authorization.v1.SubjectRulesReviewStatus") + proto.RegisterType((*UserRestriction)(nil), "github.com.openshift.api.authorization.v1.UserRestriction") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/authorization/v1/generated.proto", fileDescriptor_39b89822f939ca46) +} + +var fileDescriptor_39b89822f939ca46 = []byte{ + // 1841 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x1a, 0xcd, 0x6f, 0x1b, 0x4b, + 0x3d, 0x63, 0x3b, 0x8e, 0xfd, 0x73, 0x13, 0xe7, 0x4d, 0xf3, 0xd2, 0x6d, 0xa0, 0xb6, 0xb5, 0x20, + 0x48, 0x05, 0x6f, 0x4d, 0x02, 0x94, 0xb6, 0x4f, 0xe8, 0xc9, 0xee, 0x8b, 0xaa, 0x48, 0xa5, 0xc9, + 0x9b, 0xf0, 0x9e, 0xaa, 0xf2, 0x21, 0xd6, 0x9b, 0x89, 0xbd, 0x64, 0xbd, 0x6b, 0xed, 0xac, 0x53, + 0x0a, 0x42, 0x2a, 0x48, 0x1c, 0xb8, 0x20, 0x4e, 0x88, 0x23, 0x88, 0x3f, 0x00, 0x71, 0x41, 0x02, + 0x09, 0x4e, 0x1c, 0x82, 0x84, 0x50, 0x25, 0x2e, 0x15, 0x42, 0x86, 0xba, 0x9c, 0x38, 0x72, 0xe1, + 0x8a, 0x66, 0x76, 0xd6, 0xfb, 0x61, 0x5b, 0xf1, 0x26, 0x24, 0xbc, 0x56, 0xbd, 0x79, 0xe7, 0xf7, + 0xfd, 0x9b, 0xdf, 0xe7, 0x24, 0x70, 0xab, 0x6d, 0x7a, 0x9d, 0x7e, 0x4b, 0x33, 0x9c, 0x6e, 0xdd, + 0xe9, 0x51, 0x9b, 0x75, 0xcc, 0x03, 0xaf, 0xae, 0xf7, 0xcc, 0xba, 0xde, 0xf7, 0x3a, 0x8e, 0x6b, + 0x7e, 0x5b, 0xf7, 0x4c, 0xc7, 0xae, 0x1f, 0x6d, 0xd4, 0xdb, 0xd4, 0xa6, 0xae, 0xee, 0xd1, 0x7d, + 0xad, 0xe7, 0x3a, 0x9e, 0x83, 0xaf, 0x87, 0xa4, 0xda, 0x88, 0x54, 0xd3, 0x7b, 0xa6, 0x16, 0x23, + 0xd5, 0x8e, 0x36, 0xd6, 0xde, 0x8a, 0x48, 0x69, 0x3b, 0x6d, 0xa7, 0x2e, 0x38, 0xb4, 0xfa, 0x07, + 0xe2, 0x4b, 0x7c, 0x88, 0x5f, 0x3e, 0xe7, 0x35, 0xf5, 0xf0, 0x26, 0xd3, 0x4c, 0x47, 0xa8, 0x61, + 0x38, 0x2e, 0x9d, 0x20, 0x3d, 0x86, 0xe3, 0xb6, 0x74, 0x63, 0x12, 0xce, 0xe7, 0x42, 0x9c, 0xae, + 0x6e, 0x74, 0x4c, 0x9b, 0xba, 0x8f, 0xeb, 0xbd, 0xc3, 0x36, 0x3f, 0x60, 0xf5, 0x2e, 0xf5, 0xf4, + 0x49, 0x54, 0xf5, 0x69, 0x54, 0x6e, 0xdf, 0xf6, 0xcc, 0x2e, 0x1d, 0x23, 0xb8, 0x71, 0x12, 0x01, + 0x33, 0x3a, 0xb4, 0xab, 0x27, 0xe9, 0xd4, 0xef, 0xe5, 0x20, 0xdf, 0x30, 0xb8, 0x8f, 0x70, 0x1d, + 0x8a, 0xb6, 0xde, 0xa5, 0xac, 0xa7, 0x1b, 0x54, 0x41, 0x35, 0xb4, 0x5e, 0x6c, 0xbe, 0x71, 0x3c, + 0xa8, 0xce, 0x0d, 0x07, 0xd5, 0xe2, 0xfd, 0x00, 0x40, 0x42, 0x1c, 0x5c, 0x83, 0xdc, 0x11, 0x75, + 0x5b, 0x4a, 0x46, 0xe0, 0x5e, 0x92, 0xb8, 0xb9, 0x0f, 0xa8, 0xdb, 0x22, 0x02, 0x82, 0x6f, 0xc1, + 0xb2, 0x4b, 0x99, 0xd3, 0x77, 0x0d, 0xda, 0xd8, 0xdd, 0xbe, 0xeb, 0x3a, 0xfd, 0x9e, 0x92, 0x15, + 0xd8, 0x8b, 0x12, 0x7b, 0x5e, 0x1c, 0x92, 0x31, 0x34, 0xfc, 0x0e, 0xe0, 0xc8, 0xd9, 0x07, 0xd4, + 0x65, 0xa6, 0x63, 0x2b, 0x39, 0x41, 0x5c, 0x96, 0xc4, 0x0b, 0xf2, 0x98, 0x4c, 0x40, 0xc5, 0x9f, + 0x86, 0x42, 0x70, 0xaa, 0xcc, 0x0b, 0xb2, 0x65, 0x49, 0x56, 0x20, 0xf2, 0x9c, 0x8c, 0x30, 0xf0, + 0x4d, 0xb8, 0x14, 0xfc, 0xe6, 0xb6, 0x2a, 0x79, 0x41, 0xb1, 0x22, 0x29, 0x2e, 0x91, 0x08, 0x8c, + 0xc4, 0x30, 0xb9, 0x17, 0x7a, 0xba, 0xd7, 0x51, 0x0a, 0x71, 0x2f, 0xec, 0xea, 0x5e, 0x87, 0x08, + 0x08, 0x7e, 0x17, 0x96, 0x4d, 0x76, 0xdf, 0xb1, 0x03, 0x26, 0xef, 0x93, 0x7b, 0x4a, 0xb1, 0x86, + 0xd6, 0x0b, 0x4d, 0x45, 0x62, 0x2f, 0x6f, 0x27, 0xe0, 0x64, 0x8c, 0x02, 0x3f, 0x80, 0x05, 0xc3, + 0xb1, 0x3d, 0x6a, 0x7b, 0xca, 0x42, 0x0d, 0xad, 0x97, 0x36, 0xdf, 0xd2, 0xfc, 0x3b, 0xd7, 0xa2, + 0x77, 0xae, 0xf5, 0x0e, 0xdb, 0x9a, 0xbc, 0x73, 0x8d, 0xe8, 0x8f, 0xb6, 0xbe, 0xe5, 0x51, 0x9b, + 0xfb, 0x23, 0x74, 0xda, 0x1d, 0x9f, 0x0b, 0x09, 0xd8, 0xa9, 0xbf, 0xcc, 0x40, 0xe9, 0x8e, 0xd5, + 0x67, 0x1e, 0x75, 0x89, 0x63, 0x51, 0xfc, 0x0d, 0x28, 0xf0, 0xb8, 0xdc, 0xd7, 0x3d, 0x5d, 0xc4, + 0x41, 0x69, 0xf3, 0x33, 0x53, 0x45, 0xf1, 0x28, 0xd6, 0x38, 0xb6, 0x76, 0xb4, 0xa1, 0xed, 0xb4, + 0xbe, 0x49, 0x0d, 0xef, 0x4b, 0xd4, 0xd3, 0x9b, 0x58, 0x4a, 0x83, 0xf0, 0x8c, 0x8c, 0xb8, 0xe2, + 0x87, 0x30, 0xef, 0xf6, 0x2d, 0xca, 0x94, 0x4c, 0x2d, 0xbb, 0x5e, 0xda, 0xfc, 0xbc, 0x36, 0x73, + 0x1a, 0x6b, 0xbb, 0x8e, 0x65, 0x1a, 0x8f, 0x49, 0xdf, 0xa2, 0x61, 0x0c, 0xf1, 0x2f, 0x46, 0x7c, + 0x96, 0xb8, 0x05, 0x65, 0xbd, 0xdd, 0x76, 0x69, 0x5b, 0x90, 0x70, 0x90, 0x08, 0xb9, 0xd2, 0xe6, + 0xc7, 0x22, 0x46, 0x68, 0x3c, 0x5d, 0x39, 0xbb, 0x46, 0x1c, 0xb5, 0x79, 0x79, 0x38, 0xa8, 0x96, + 0x13, 0x87, 0x24, 0xc9, 0x50, 0xfd, 0x57, 0x16, 0x70, 0xc4, 0x63, 0x4d, 0xd3, 0xde, 0x37, 0xed, + 0xf6, 0x05, 0x38, 0x8e, 0x42, 0xb1, 0xcf, 0xa8, 0x2b, 0xd2, 0x51, 0xe4, 0x5d, 0x69, 0xf3, 0x66, + 0x0a, 0xe7, 0xed, 0xf4, 0xf8, 0x2f, 0xdd, 0x12, 0xf4, 0xcd, 0x45, 0x9e, 0xd9, 0xef, 0x07, 0xec, + 0x48, 0xc8, 0x19, 0x77, 0x00, 0xda, 0x3c, 0x0b, 0x7d, 0x39, 0xd9, 0x33, 0xca, 0x59, 0xe2, 0xe6, + 0xdc, 0x1d, 0xf1, 0x23, 0x11, 0xde, 0xf8, 0x3d, 0x28, 0xb0, 0xbe, 0xb0, 0x94, 0x29, 0x39, 0x11, + 0x0c, 0xb1, 0x6b, 0xe2, 0x95, 0x37, 0x74, 0x10, 0xa1, 0x07, 0xd4, 0xa5, 0xb6, 0x41, 0xc3, 0x54, + 0xde, 0x93, 0xc4, 0x64, 0xc4, 0x06, 0xdf, 0x87, 0x05, 0xd7, 0xb1, 0x28, 0xa1, 0x07, 0x22, 0xef, + 0x67, 0xe4, 0x38, 0x4a, 0x0f, 0xe2, 0xd3, 0x92, 0x80, 0x89, 0xfa, 0x57, 0x04, 0xab, 0xe3, 0x97, + 0x7d, 0xcf, 0x64, 0x1e, 0xfe, 0xea, 0xd8, 0x85, 0x6b, 0xb3, 0x5d, 0x38, 0xa7, 0x16, 0xd7, 0x3d, + 0x32, 0x24, 0x38, 0x89, 0x5c, 0x76, 0x0b, 0xe6, 0x4d, 0x8f, 0x76, 0x83, 0x2c, 0xf9, 0x62, 0x8a, + 0x0b, 0x18, 0xd7, 0x37, 0xcc, 0x96, 0x6d, 0xce, 0x93, 0xf8, 0xac, 0xd5, 0x3f, 0x21, 0x28, 0x47, + 0x90, 0x2f, 0xc0, 0xaa, 0xaf, 0xc4, 0xad, 0xba, 0x71, 0x4a, 0xab, 0x26, 0x9b, 0xf3, 0x13, 0x04, + 0xcb, 0x7e, 0x47, 0xa1, 0xcc, 0x73, 0x4d, 0xbf, 0xb1, 0xa9, 0x90, 0x17, 0x11, 0xc7, 0x14, 0x54, + 0xcb, 0xae, 0x17, 0x9b, 0x30, 0x1c, 0x54, 0xf3, 0x02, 0x8b, 0x11, 0x09, 0xc1, 0x5f, 0x87, 0xbc, + 0xa5, 0xb7, 0xa8, 0x15, 0xa8, 0xf5, 0xd9, 0x19, 0x2d, 0xe6, 0x34, 0x7b, 0xd4, 0xa2, 0x86, 0xe7, + 0xb8, 0x61, 0xbb, 0x0c, 0x4e, 0x18, 0x91, 0x5c, 0xd5, 0x2a, 0x5c, 0xdb, 0x66, 0xbb, 0xd4, 0x65, + 0x3c, 0x2d, 0x64, 0xd0, 0x36, 0x0c, 0x83, 0x32, 0x46, 0xe8, 0x91, 0x49, 0x1f, 0xa9, 0x7f, 0x46, + 0x70, 0xf5, 0x9e, 0x63, 0xe8, 0x56, 0x50, 0xf3, 0xa3, 0xd0, 0x58, 0x65, 0xc9, 0x9c, 0x4b, 0x65, + 0xd9, 0x09, 0xe6, 0x00, 0x79, 0xe5, 0x1b, 0x29, 0xee, 0xc5, 0x27, 0x6c, 0xe6, 0xb8, 0x00, 0x22, + 0xd9, 0xa8, 0xff, 0xc9, 0x80, 0x22, 0x0c, 0x9a, 0x60, 0x6d, 0xcc, 0x9e, 0xf9, 0x97, 0xc2, 0x1e, + 0xde, 0xe7, 0x79, 0x81, 0x4c, 0x4e, 0x3b, 0xbc, 0x7e, 0x12, 0x01, 0xc1, 0x9f, 0x1c, 0xc5, 0x59, + 0x56, 0xc4, 0x59, 0x79, 0x38, 0xa8, 0x96, 0xfc, 0x38, 0xdb, 0xb3, 0x4c, 0x83, 0x8e, 0x82, 0xed, + 0x6b, 0x90, 0x67, 0x86, 0xd3, 0xa3, 0x4c, 0xcc, 0x33, 0xa5, 0xcd, 0x5b, 0xa7, 0x28, 0xad, 0x7b, + 0x82, 0x81, 0x1f, 0xcb, 0xfe, 0x6f, 0x22, 0x99, 0xaa, 0x3f, 0x42, 0xb0, 0xcc, 0xab, 0xeb, 0x7e, + 0xb4, 0xa9, 0xd7, 0x20, 0xc7, 0x27, 0x37, 0x39, 0xd8, 0x8d, 0xd4, 0x17, 0x03, 0x8d, 0x80, 0xe0, + 0x07, 0x90, 0xe3, 0x25, 0x4f, 0xc6, 0xd7, 0x69, 0xf3, 0x72, 0xc4, 0x59, 0xd4, 0x51, 0xc1, 0x51, + 0xfd, 0x15, 0x82, 0x2b, 0x49, 0x85, 0x82, 0x9e, 0x79, 0xb2, 0x5e, 0x1e, 0x94, 0xdc, 0x90, 0x40, + 0xaa, 0x77, 0xc6, 0x62, 0x78, 0x59, 0xca, 0x29, 0x45, 0x0e, 0x49, 0x54, 0x8c, 0xfa, 0x04, 0x81, + 0x98, 0x7a, 0xf7, 0x67, 0xf4, 0xde, 0x7b, 0x31, 0xef, 0xd5, 0x53, 0xa8, 0x37, 0xd5, 0x6d, 0xbf, + 0x08, 0xee, 0x31, 0x9d, 0xbf, 0xba, 0x93, 0xfc, 0x75, 0x23, 0xad, 0x42, 0x33, 0x3b, 0xea, 0x36, + 0x2c, 0xc6, 0xda, 0x3d, 0xae, 0x06, 0x05, 0xde, 0xaf, 0xb6, 0xc5, 0x64, 0x91, 0xbe, 0x5d, 0xf8, + 0xe9, 0xcf, 0xaa, 0x73, 0x4f, 0xfe, 0x56, 0x9b, 0x53, 0xdf, 0x86, 0xa5, 0x78, 0x3c, 0xa7, 0x21, + 0xfe, 0x61, 0x16, 0x20, 0x9c, 0x06, 0x39, 0x25, 0xdf, 0x39, 0x62, 0x94, 0x7c, 0x15, 0x61, 0xc4, + 0x3f, 0xc7, 0xdf, 0x47, 0xf0, 0xa6, 0xee, 0x79, 0xae, 0xd9, 0xea, 0x7b, 0x34, 0xd2, 0x1f, 0x82, + 0x41, 0x2a, 0xe5, 0x3c, 0x7d, 0x4d, 0x7a, 0xe6, 0xcd, 0xc6, 0x24, 0x9e, 0x64, 0xb2, 0x28, 0xfc, + 0x29, 0x28, 0xea, 0x3d, 0xf3, 0x6e, 0xb4, 0x4c, 0x88, 0x31, 0x2c, 0xd8, 0x7b, 0x18, 0x09, 0xe1, + 0x1c, 0x39, 0x58, 0x35, 0xfc, 0xe9, 0x48, 0x22, 0x07, 0x2d, 0x82, 0x91, 0x10, 0x8e, 0xbf, 0x00, + 0x8b, 0xd1, 0xbd, 0x84, 0x29, 0xf3, 0x82, 0xe0, 0x8d, 0xe1, 0xa0, 0xba, 0x18, 0x5d, 0x5f, 0x18, + 0x89, 0xe3, 0xe1, 0x26, 0x94, 0xed, 0xd8, 0xaa, 0xc1, 0x94, 0xbc, 0x20, 0x55, 0x86, 0x83, 0xea, + 0x4a, 0x7c, 0x0b, 0x91, 0x85, 0x2c, 0x49, 0xa0, 0xfe, 0x11, 0xc1, 0xca, 0xab, 0xd2, 0xb8, 0xfe, + 0x8e, 0xe0, 0xa3, 0x93, 0x6c, 0x21, 0x94, 0xf5, 0x1c, 0x9b, 0xd1, 0xf4, 0x8b, 0xf2, 0xc7, 0x61, + 0x9e, 0x37, 0x08, 0x7f, 0xb6, 0x28, 0xfa, 0xf3, 0x30, 0xef, 0x1b, 0xd2, 0x9b, 0x3e, 0x70, 0xf6, + 0xf6, 0xf1, 0x0e, 0x2c, 0xd1, 0x23, 0xdd, 0xea, 0x73, 0x6d, 0xb7, 0x5c, 0xd7, 0x71, 0xe5, 0x5a, + 0x7c, 0x45, 0x2a, 0x51, 0xde, 0xe2, 0x50, 0x7d, 0x04, 0x26, 0x09, 0x74, 0xf5, 0x0f, 0x08, 0x72, + 0x2f, 0xff, 0xa6, 0xa7, 0xbe, 0xc8, 0x42, 0xe9, 0xf5, 0xfa, 0xf5, 0xaa, 0xaf, 0x5f, 0x7c, 0x43, + 0xb9, 0xd8, 0xbd, 0xeb, 0x0c, 0x1b, 0xca, 0xc9, 0x0b, 0xd7, 0x0b, 0x04, 0xab, 0xd1, 0x5e, 0x1a, + 0xd9, 0x53, 0xce, 0x3f, 0x7e, 0xdb, 0x90, 0x63, 0x3d, 0x6a, 0xc8, 0xd0, 0xdd, 0x3a, 0x9d, 0x61, + 0x11, 0x95, 0xf7, 0x7a, 0xd4, 0x08, 0x67, 0x10, 0xfe, 0x45, 0x84, 0x00, 0x75, 0x88, 0x60, 0x6d, + 0x32, 0xc9, 0x05, 0xdc, 0xdf, 0x41, 0xfc, 0xfe, 0x1a, 0x67, 0x36, 0x73, 0xca, 0x55, 0xfe, 0x36, + 0x3b, 0xcd, 0x48, 0xee, 0x09, 0xfc, 0x18, 0xca, 0x3c, 0xa5, 0xdd, 0xf0, 0x58, 0xda, 0x7a, 0x3b, + 0x85, 0x42, 0x62, 0xbd, 0x88, 0x68, 0x22, 0xde, 0xa7, 0x12, 0x87, 0x24, 0x29, 0x07, 0x7f, 0x17, + 0x96, 0x45, 0x92, 0x47, 0x65, 0xfb, 0x77, 0xfe, 0x76, 0x0a, 0xd9, 0xc9, 0x45, 0xba, 0xb9, 0x32, + 0x1c, 0x54, 0xc7, 0xd6, 0x6b, 0x32, 0x26, 0x0a, 0xff, 0x1c, 0xc1, 0x55, 0x46, 0xdd, 0x23, 0xd3, + 0xa0, 0xba, 0x61, 0x38, 0x7d, 0xdb, 0x8b, 0x2a, 0xe2, 0xd7, 0xb3, 0x77, 0x53, 0x28, 0xb2, 0xe7, + 0xf3, 0x6a, 0xf8, 0xbc, 0xa2, 0x1a, 0x5d, 0x1b, 0x0e, 0xaa, 0x57, 0xa7, 0x82, 0xc9, 0x74, 0x2d, + 0xd4, 0xdf, 0x23, 0x28, 0x5c, 0xd0, 0x8b, 0xc7, 0x97, 0xe3, 0xf1, 0x98, 0x7a, 0x37, 0x98, 0x1c, + 0x7d, 0x4f, 0x33, 0xb0, 0xba, 0x47, 0xad, 0x03, 0x59, 0x82, 0xfd, 0xce, 0x38, 0x3e, 0x74, 0x65, + 0xcf, 0xb5, 0x90, 0xa0, 0xd4, 0x85, 0x64, 0xb2, 0xca, 0xd3, 0x0a, 0x09, 0x3e, 0x84, 0x3c, 0xf3, + 0x74, 0xaf, 0x1f, 0xb4, 0xdb, 0x3b, 0x69, 0x44, 0x8d, 0x8b, 0x11, 0xac, 0x9a, 0x4b, 0x52, 0x50, + 0xde, 0xff, 0x26, 0x52, 0x84, 0xfa, 0x1d, 0x58, 0x9b, 0xae, 0x5e, 0x64, 0x6b, 0x47, 0xe7, 0xb1, + 0xb5, 0x5b, 0x70, 0x25, 0x19, 0xc8, 0xb2, 0x39, 0xce, 0xb0, 0xf3, 0xc5, 0x46, 0xd2, 0xcc, 0xc9, + 0x23, 0xa9, 0xfa, 0x17, 0x04, 0xd3, 0xf3, 0x06, 0xff, 0x00, 0x41, 0x39, 0x9e, 0x3a, 0xfe, 0x5a, + 0x55, 0xda, 0x6c, 0x9e, 0x21, 0x6d, 0x83, 0x5e, 0x3f, 0x9a, 0x53, 0xe3, 0x08, 0x8c, 0x24, 0x65, + 0x62, 0x0d, 0x60, 0xa4, 0x72, 0x6c, 0x7a, 0x1e, 0xd9, 0xc4, 0x48, 0x04, 0x43, 0xfd, 0x77, 0x06, + 0x2e, 0xbf, 0x7e, 0x6e, 0xba, 0xe0, 0xe7, 0xa6, 0x7f, 0x22, 0xf8, 0xc8, 0x04, 0xa7, 0x9f, 0x7e, + 0x5d, 0xba, 0x0e, 0x0b, 0xba, 0x65, 0x39, 0x8f, 0xe8, 0xbe, 0xb0, 0xbe, 0x10, 0x0e, 0x87, 0x0d, + 0xff, 0x98, 0x04, 0x70, 0xfc, 0x09, 0xc8, 0xbb, 0x54, 0x67, 0xb2, 0xab, 0x14, 0xc3, 0xcc, 0x26, + 0xe2, 0x94, 0x48, 0x28, 0x6e, 0x40, 0x99, 0xc6, 0x97, 0xa2, 0x93, 0x76, 0xa6, 0x24, 0xbe, 0x7a, + 0x9c, 0x01, 0xfc, 0x7f, 0xa9, 0xb5, 0x46, 0xac, 0xd6, 0x36, 0xce, 0x56, 0x00, 0x3f, 0x14, 0x75, + 0xf6, 0x77, 0x08, 0x56, 0xa7, 0x14, 0xd9, 0x20, 0xec, 0xd1, 0xd4, 0xb0, 0x0f, 0x5f, 0xf3, 0x33, + 0x53, 0x5f, 0xf3, 0xc3, 0x88, 0xcf, 0x9e, 0x47, 0xc4, 0xff, 0x06, 0x81, 0x32, 0xcd, 0xe8, 0x70, + 0xe3, 0x45, 0xff, 0xfb, 0xbf, 0x6d, 0x4e, 0x08, 0xe3, 0x4c, 0xca, 0x30, 0xfe, 0x35, 0x82, 0xe4, + 0xfc, 0x88, 0xab, 0xc1, 0xfb, 0x44, 0xe4, 0xe9, 0x4c, 0xbc, 0x4f, 0x04, 0x4f, 0x13, 0xb3, 0xf8, + 0x3c, 0xfc, 0x0b, 0x4a, 0xf6, 0x3c, 0xfe, 0x82, 0xd2, 0xdc, 0x39, 0x7e, 0x5e, 0x99, 0x7b, 0xfa, + 0xbc, 0x32, 0xf7, 0xec, 0x79, 0x65, 0xee, 0xc9, 0xb0, 0x82, 0x8e, 0x87, 0x15, 0xf4, 0x74, 0x58, + 0x41, 0xcf, 0x86, 0x15, 0xf4, 0x8f, 0x61, 0x05, 0xfd, 0xf8, 0x45, 0x65, 0xee, 0xe1, 0xf5, 0x99, + 0xff, 0x97, 0xe4, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x1c, 0xfa, 0x7f, 0x77, 0x22, 0x00, + 0x00, +} + +func (m *Action) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Action) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Action) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.IsNonResourceURL { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x42 + { + size, err := m.Content.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + i -= len(m.ResourceName) + copy(dAtA[i:], m.ResourceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceName))) + i-- + dAtA[i] = 0x32 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x2a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x22 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x1a + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterRole) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AggregationRule != nil { + { + size, err := m.AggregationRule.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Subjects) > 0 { + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.GroupNames != nil { + { + size, err := m.GroupNames.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.UserNames != nil { + { + size, err := m.UserNames.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GroupRestriction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupRestriction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *IsPersonalSubjectAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IsPersonalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IsPersonalSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *LocalResourceAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalResourceAccessReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalResourceAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if m.Scopes != nil { + { + size, err := m.Scopes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.GroupsSlice) > 0 { + for iNdEx := len(m.GroupsSlice) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.GroupsSlice[iNdEx]) + copy(dAtA[i:], m.GroupsSlice[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupsSlice[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NamedClusterRole) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedClusterRole) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamedClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Role.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NamedClusterRoleBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamedClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.RoleBinding.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NamedRole) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedRole) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamedRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Role.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NamedRoleBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedRoleBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamedRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.RoleBinding.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m OptionalNames) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m OptionalNames) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m OptionalNames) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m OptionalScopes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m OptionalScopes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m OptionalScopes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceURLsSlice) > 0 { + for iNdEx := len(m.NonResourceURLsSlice) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLsSlice[iNdEx]) + copy(dAtA[i:], m.NonResourceURLsSlice[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLsSlice[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.AttributeRestrictions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceAccessReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceAccessReviewResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceAccessReviewResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAccessReviewResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x22 + if len(m.GroupsSlice) > 0 { + for iNdEx := len(m.GroupsSlice) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.GroupsSlice[iNdEx]) + copy(dAtA[i:], m.GroupsSlice[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupsSlice[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.UsersSlice) > 0 { + for iNdEx := len(m.UsersSlice) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.UsersSlice[iNdEx]) + copy(dAtA[i:], m.UsersSlice[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UsersSlice[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Role) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Role) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Subjects) > 0 { + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.GroupNames != nil { + { + size, err := m.GroupNames.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.UserNames != nil { + { + size, err := m.UserNames.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleBindingRestriction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBindingRestriction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleBindingRestrictionList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBindingRestrictionList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingRestrictionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleBindingRestrictionSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBindingRestrictionSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingRestrictionSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ServiceAccountRestriction != nil { + { + size, err := m.ServiceAccountRestriction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.GroupRestriction != nil { + { + size, err := m.GroupRestriction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.UserRestriction != nil { + { + size, err := m.UserRestriction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RoleList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scopes != nil { + { + size, err := m.Scopes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ServiceAccountReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccountReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccountReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceAccountRestriction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccountRestriction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccountRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.ServiceAccounts) > 0 { + for iNdEx := len(m.ServiceAccounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ServiceAccounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if m.Scopes != nil { + { + size, err := m.Scopes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.GroupsSlice) > 0 { + for iNdEx := len(m.GroupsSlice) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.GroupsSlice[iNdEx]) + copy(dAtA[i:], m.GroupsSlice[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupsSlice[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SubjectAccessReviewResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAccessReviewResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReviewResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + i-- + if m.Allowed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SubjectRulesReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectRulesReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectRulesReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scopes != nil { + { + size, err := m.Scopes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectRulesReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x12 + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *UserRestriction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserRestriction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Users) > 0 { + for iNdEx := len(m.Users) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Users[iNdEx]) + copy(dAtA[i:], m.Users[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Users[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Action) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Content.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *ClusterRole) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AggregationRule != nil { + l = m.AggregationRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ClusterRoleBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.UserNames != nil { + l = m.UserNames.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GroupNames != nil { + l = m.GroupNames.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterRoleBindingList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRoleList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *GroupRestriction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IsPersonalSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *LocalResourceAccessReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Action.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LocalSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Action.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.GroupsSlice) > 0 { + for _, s := range m.GroupsSlice { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Scopes != nil { + l = m.Scopes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NamedClusterRole) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Role.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NamedClusterRoleBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.RoleBinding.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NamedRole) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Role.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NamedRoleBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.RoleBinding.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m OptionalNames) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m OptionalScopes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.AttributeRestrictions.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLsSlice) > 0 { + for _, s := range m.NonResourceURLsSlice { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceAccessReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Action.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceAccessReviewResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UsersSlice) > 0 { + for _, s := range m.UsersSlice { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.GroupsSlice) > 0 { + for _, s := range m.GroupsSlice { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Role) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.UserNames != nil { + l = m.UserNames.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GroupNames != nil { + l = m.GroupNames.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RoleBindingList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleBindingRestriction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RoleBindingRestrictionList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleBindingRestrictionSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UserRestriction != nil { + l = m.UserRestriction.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GroupRestriction != nil { + l = m.GroupRestriction.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ServiceAccountRestriction != nil { + l = m.ServiceAccountRestriction.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RoleList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SelfSubjectRulesReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectRulesReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Scopes != nil { + l = m.Scopes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ServiceAccountReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceAccountRestriction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ServiceAccounts) > 0 { + for _, e := range m.ServiceAccounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Action.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.GroupsSlice) > 0 { + for _, s := range m.GroupsSlice { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Scopes != nil { + l = m.Scopes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectAccessReviewResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectRulesReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectRulesReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Scopes != nil { + l = m.Scopes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SubjectRulesReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *UserRestriction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Users) > 0 { + for _, s := range m.Users { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Action) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Action{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`, + `Content:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Content), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `IsNonResourceURL:` + fmt.Sprintf("%v", this.IsNonResourceURL) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRole) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&ClusterRole{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "v11.AggregationRule", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBinding) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubjects := "[]ObjectReference{" + for _, f := range this.Subjects { + repeatedStringForSubjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForSubjects += "}" + s := strings.Join([]string{`&ClusterRoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `UserNames:` + strings.Replace(fmt.Sprintf("%v", this.UserNames), "OptionalNames", "OptionalNames", 1) + `,`, + `GroupNames:` + strings.Replace(fmt.Sprintf("%v", this.GroupNames), "OptionalNames", "OptionalNames", 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, + `RoleRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RoleRef), "ObjectReference", "v12.ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterRoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterRoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterRole{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterRoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *GroupRestriction) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]LabelSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += fmt.Sprintf("%v", f) + "," + } + repeatedStringForSelectors += "}" + s := strings.Join([]string{`&GroupRestriction{`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Selectors:` + repeatedStringForSelectors + `,`, + `}`, + }, "") + return s +} +func (this *IsPersonalSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IsPersonalSubjectAccessReview{`, + `}`, + }, "") + return s +} +func (this *LocalResourceAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalResourceAccessReview{`, + `Action:` + strings.Replace(strings.Replace(this.Action.String(), "Action", "Action", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LocalSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalSubjectAccessReview{`, + `Action:` + strings.Replace(strings.Replace(this.Action.String(), "Action", "Action", 1), `&`, ``, 1) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `GroupsSlice:` + fmt.Sprintf("%v", this.GroupsSlice) + `,`, + `Scopes:` + strings.Replace(fmt.Sprintf("%v", this.Scopes), "OptionalScopes", "OptionalScopes", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamedClusterRole) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedClusterRole{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Role:` + strings.Replace(strings.Replace(this.Role.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamedClusterRoleBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedClusterRoleBinding{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `RoleBinding:` + strings.Replace(strings.Replace(this.RoleBinding.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamedRole) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedRole{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Role:` + strings.Replace(strings.Replace(this.Role.String(), "Role", "Role", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamedRoleBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedRoleBinding{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `RoleBinding:` + strings.Replace(strings.Replace(this.RoleBinding.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `AttributeRestrictions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AttributeRestrictions), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `NonResourceURLsSlice:` + fmt.Sprintf("%v", this.NonResourceURLsSlice) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceAccessReview{`, + `Action:` + strings.Replace(strings.Replace(this.Action.String(), "Action", "Action", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceAccessReviewResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceAccessReviewResponse{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `UsersSlice:` + fmt.Sprintf("%v", this.UsersSlice) + `,`, + `GroupsSlice:` + fmt.Sprintf("%v", this.GroupsSlice) + `,`, + `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `}`, + }, "") + return s +} +func (this *Role) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&Role{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `}`, + }, "") + return s +} +func (this *RoleBinding) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubjects := "[]ObjectReference{" + for _, f := range this.Subjects { + repeatedStringForSubjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForSubjects += "}" + s := strings.Join([]string{`&RoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `UserNames:` + strings.Replace(fmt.Sprintf("%v", this.UserNames), "OptionalNames", "OptionalNames", 1) + `,`, + `GroupNames:` + strings.Replace(fmt.Sprintf("%v", this.GroupNames), "OptionalNames", "OptionalNames", 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, + `RoleRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RoleRef), "ObjectReference", "v12.ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *RoleBindingRestriction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleBindingRestriction{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "RoleBindingRestrictionSpec", "RoleBindingRestrictionSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleBindingRestrictionList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RoleBindingRestriction{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBindingRestriction", "RoleBindingRestriction", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RoleBindingRestrictionList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *RoleBindingRestrictionSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleBindingRestrictionSpec{`, + `UserRestriction:` + strings.Replace(this.UserRestriction.String(), "UserRestriction", "UserRestriction", 1) + `,`, + `GroupRestriction:` + strings.Replace(this.GroupRestriction.String(), "GroupRestriction", "GroupRestriction", 1) + `,`, + `ServiceAccountRestriction:` + strings.Replace(this.ServiceAccountRestriction.String(), "ServiceAccountRestriction", "ServiceAccountRestriction", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Role{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Role", "Role", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectRulesReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectRulesReview{`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectRulesReviewSpec", "SelfSubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectRulesReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectRulesReviewSpec{`, + `Scopes:` + strings.Replace(fmt.Sprintf("%v", this.Scopes), "OptionalScopes", "OptionalScopes", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountRestriction) String() string { + if this == nil { + return "nil" + } + repeatedStringForServiceAccounts := "[]ServiceAccountReference{" + for _, f := range this.ServiceAccounts { + repeatedStringForServiceAccounts += strings.Replace(strings.Replace(f.String(), "ServiceAccountReference", "ServiceAccountReference", 1), `&`, ``, 1) + "," + } + repeatedStringForServiceAccounts += "}" + s := strings.Join([]string{`&ServiceAccountRestriction{`, + `ServiceAccounts:` + repeatedStringForServiceAccounts + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReview{`, + `Action:` + strings.Replace(strings.Replace(this.Action.String(), "Action", "Action", 1), `&`, ``, 1) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `GroupsSlice:` + fmt.Sprintf("%v", this.GroupsSlice) + `,`, + `Scopes:` + strings.Replace(fmt.Sprintf("%v", this.Scopes), "OptionalScopes", "OptionalScopes", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReviewResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReviewResponse{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectRulesReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectRulesReview{`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectRulesReviewSpec", "SubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectRulesReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectRulesReviewSpec{`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Scopes:` + strings.Replace(fmt.Sprintf("%v", this.Scopes), "OptionalScopes", "OptionalScopes", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectRulesReviewStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&SubjectRulesReviewStatus{`, + `Rules:` + repeatedStringForRules + `,`, + `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `}`, + }, "") + return s +} +func (this *UserRestriction) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]LabelSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += fmt.Sprintf("%v", f) + "," + } + repeatedStringForSelectors += "}" + s := strings.Join([]string{`&UserRestriction{`, + `Users:` + fmt.Sprintf("%v", this.Users) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Selectors:` + repeatedStringForSelectors + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Action) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Action: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Action: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Content", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Content.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsNonResourceURL", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsNonResourceURL = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRole) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AggregationRule == nil { + m.AggregationRule = &v11.AggregationRule{} + } + if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UserNames == nil { + m.UserNames = OptionalNames{} + } + if err := m.UserNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GroupNames == nil { + m.GroupNames = OptionalNames{} + } + if err := m.GroupNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, v12.ObjectReference{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRole{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupRestriction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupRestriction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupRestriction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selectors = append(m.Selectors, v1.LabelSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IsPersonalSubjectAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IsPersonalSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IsPersonalSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalResourceAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalResourceAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalResourceAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupsSlice", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupsSlice = append(m.GroupsSlice, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scopes == nil { + m.Scopes = OptionalScopes{} + } + if err := m.Scopes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedClusterRole) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedClusterRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Role.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedClusterRoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedClusterRoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleBinding", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleBinding.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedRole) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Role.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedRoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedRoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleBinding", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleBinding.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OptionalNames) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OptionalNames: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OptionalNames: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OptionalScopes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OptionalScopes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OptionalScopes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttributeRestrictions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AttributeRestrictions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLsSlice", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLsSlice = append(m.NonResourceURLsSlice, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceAccessReviewResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAccessReviewResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAccessReviewResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UsersSlice", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UsersSlice = append(m.UsersSlice, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupsSlice", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupsSlice = append(m.GroupsSlice, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvaluationError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Role) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Role: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UserNames == nil { + m.UserNames = OptionalNames{} + } + if err := m.UserNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GroupNames == nil { + m.GroupNames = OptionalNames{} + } + if err := m.GroupNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, v12.ObjectReference{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBindingRestriction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBindingRestriction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBindingRestriction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBindingRestrictionList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBindingRestrictionList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBindingRestrictionList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RoleBindingRestriction{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBindingRestrictionSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBindingRestrictionSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBindingRestrictionSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserRestriction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UserRestriction == nil { + m.UserRestriction = &UserRestriction{} + } + if err := m.UserRestriction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupRestriction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GroupRestriction == nil { + m.GroupRestriction = &GroupRestriction{} + } + if err := m.GroupRestriction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountRestriction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServiceAccountRestriction == nil { + m.ServiceAccountRestriction = &ServiceAccountRestriction{} + } + if err := m.ServiceAccountRestriction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Role{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectRulesReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectRulesReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scopes == nil { + m.Scopes = OptionalScopes{} + } + if err := m.Scopes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountRestriction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountRestriction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountRestriction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccounts = append(m.ServiceAccounts, ServiceAccountReference{}) + if err := m.ServiceAccounts[len(m.ServiceAccounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupsSlice", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupsSlice = append(m.GroupsSlice, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scopes == nil { + m.Scopes = OptionalScopes{} + } + if err := m.Scopes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReviewResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReviewResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReviewResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvaluationError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectRulesReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectRulesReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectRulesReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectRulesReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectRulesReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scopes == nil { + m.Scopes = OptionalScopes{} + } + if err := m.Scopes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectRulesReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectRulesReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvaluationError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserRestriction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserRestriction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserRestriction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Users = append(m.Users, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selectors = append(m.Selectors, v1.LabelSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/authorization/v1/generated.proto b/vendor/github.com/openshift/api/authorization/v1/generated.proto new file mode 100644 index 0000000000000..f7d7b772a7fe4 --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/generated.proto @@ -0,0 +1,586 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.authorization.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/api/rbac/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/authorization/v1"; + +// Action describes a request to the API server +message Action { + // namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + optional string namespace = 1; + + // verb is one of: get, list, watch, create, update, delete + optional string verb = 2; + + // Group is the API group of the resource + // Serialized as resourceAPIGroup to avoid confusion with the 'groups' field when inlined + optional string resourceAPIGroup = 3; + + // Version is the API version of the resource + // Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined + optional string resourceAPIVersion = 4; + + // resource is one of the existing resource types + optional string resource = 5; + + // resourceName is the name of the resource being requested for a "get" or deleted for a "delete" + optional string resourceName = 6; + + // path is the path of a non resource URL + optional string path = 8; + + // isNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy) + optional bool isNonResourceURL = 9; + + // content is the actual content of the request for create and update + // +kubebuilder:pruning:PreserveUnknownFields + optional .k8s.io.apimachinery.pkg.runtime.RawExtension content = 7; +} + +// ClusterRole is a logical grouping of PolicyRules that can be referenced as a unit by ClusterRoleBindings. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ClusterRole { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // rules holds all the PolicyRules for this ClusterRole + repeated PolicyRule rules = 2; + + // aggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + optional .k8s.io.api.rbac.v1.AggregationRule aggregationRule = 3; +} + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference any ClusterRole in the same namespace or in the global namespace. +// It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. +// ClusterRoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces). +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ClusterRoleBinding { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // userNames holds all the usernames directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + optional OptionalNames userNames = 2; + + // groupNames holds all the groups directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + optional OptionalNames groupNames = 3; + + // subjects hold object references to authorize with this rule. + // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. + // Thus newer clients that do not need to support backwards compatibility should send + // only fully qualified Subjects and should omit the UserNames and GroupNames fields. + // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames. + repeated .k8s.io.api.core.v1.ObjectReference subjects = 4; + + // roleRef can only reference the current namespace and the global namespace. + // If the ClusterRoleRef cannot be resolved, the Authorizer must return an error. + // Since Policy is a singleton, this is sufficient knowledge to locate a role. + optional .k8s.io.api.core.v1.ObjectReference roleRef = 5; +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ClusterRoleBindingList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of ClusterRoleBindings + repeated ClusterRoleBinding items = 2; +} + +// ClusterRoleList is a collection of ClusterRoles +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ClusterRoleList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of ClusterRoles + repeated ClusterRole items = 2; +} + +// GroupRestriction matches a group either by a string match on the group name +// or a label selector applied to group labels. +message GroupRestriction { + // groups is a list of groups used to match against an individual user's + // groups. If the user is a member of one of the whitelisted groups, the user + // is allowed to be bound to a role. + // +nullable + repeated string groups = 1; + + // Selectors specifies a list of label selectors over group labels. + // +nullable + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labels = 2; +} + +// IsPersonalSubjectAccessReview is a marker for PolicyRule.AttributeRestrictions that denotes that subjectaccessreviews on self should be allowed +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message IsPersonalSubjectAccessReview { +} + +// LocalResourceAccessReview is a means to request a list of which users and groups are authorized to perform the action specified by spec in a particular namespace +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message LocalResourceAccessReview { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 2; + + // Action describes the action being tested. The Namespace element is FORCED to the current namespace. + optional Action Action = 1; +} + +// LocalSubjectAccessReview is an object for requesting information about whether a user or group can perform an action in a particular namespace +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message LocalSubjectAccessReview { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 5; + + // Action describes the action being tested. The Namespace element is FORCED to the current namespace. + optional Action Action = 1; + + // user is optional. If both User and Groups are empty, the current authenticated user is used. + optional string user = 2; + + // groups is optional. Groups is the list of groups to which the User belongs. + // +k8s:conversion-gen=false + repeated string groups = 3; + + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // Nil for a self-SAR, means "use the scopes on this request". + // Nil for a regular SAR, means the same as empty. + // +k8s:conversion-gen=false + optional OptionalScopes scopes = 4; +} + +// NamedClusterRole relates a name with a cluster role +message NamedClusterRole { + // name is the name of the cluster role + optional string name = 1; + + // role is the cluster role being named + optional ClusterRole role = 2; +} + +// NamedClusterRoleBinding relates a name with a cluster role binding +message NamedClusterRoleBinding { + // name is the name of the cluster role binding + optional string name = 1; + + // roleBinding is the cluster role binding being named + optional ClusterRoleBinding roleBinding = 2; +} + +// NamedRole relates a Role with a name +message NamedRole { + // name is the name of the role + optional string name = 1; + + // role is the role being named + optional Role role = 2; +} + +// NamedRoleBinding relates a role binding with a name +message NamedRoleBinding { + // name is the name of the role binding + optional string name = 1; + + // roleBinding is the role binding being named + optional RoleBinding roleBinding = 2; +} + +// OptionalNames is an array that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message OptionalNames { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// OptionalScopes is an array that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message OptionalScopes { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +message PolicyRule { + // verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + repeated string verbs = 1; + + // attributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. + // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error. + // +kubebuilder:pruning:PreserveUnknownFields + optional .k8s.io.apimachinery.pkg.runtime.RawExtension attributeRestrictions = 2; + + // apiGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. + // That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request + // will be allowed + // +optional + // +nullable + repeated string apiGroups = 3; + + // resources is a list of resources this rule applies to. ResourceAll represents all resources. + repeated string resources = 4; + + // resourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + repeated string resourceNames = 5; + + // NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. + repeated string nonResourceURLs = 6; +} + +// ResourceAccessReview is a means to request a list of which users and groups are authorized to perform the +// action specified by spec +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ResourceAccessReview { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 2; + + // Action describes the action being tested. + optional Action Action = 1; +} + +// ResourceAccessReviewResponse describes who can perform the action +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ResourceAccessReviewResponse { + // namespace is the namespace used for the access review + optional string namespace = 1; + + // UsersSlice is the list of users who can perform the action + // +k8s:conversion-gen=false + repeated string users = 2; + + // GroupsSlice is the list of groups who can perform the action + // +k8s:conversion-gen=false + repeated string groups = 3; + + // EvaluationError is an indication that some error occurred during resolution, but partial results can still be returned. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is + // most common when a bound role is missing, but enough roles are still present and bound to reason about the request. + optional string evalutionError = 4; +} + +// Role is a logical grouping of PolicyRules that can be referenced as a unit by RoleBindings. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message Role { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // rules holds all the PolicyRules for this Role + repeated PolicyRule rules = 2; +} + +// RoleBinding references a Role, but not contain it. It can reference any Role in the same namespace or in the global namespace. +// It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. +// RoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces). +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message RoleBinding { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // userNames holds all the usernames directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + optional OptionalNames userNames = 2; + + // groupNames holds all the groups directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + optional OptionalNames groupNames = 3; + + // subjects hold object references to authorize with this rule. + // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. + // Thus newer clients that do not need to support backwards compatibility should send + // only fully qualified Subjects and should omit the UserNames and GroupNames fields. + // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames. + repeated .k8s.io.api.core.v1.ObjectReference subjects = 4; + + // roleRef can only reference the current namespace and the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + // Since Policy is a singleton, this is sufficient knowledge to locate a role. + optional .k8s.io.api.core.v1.ObjectReference roleRef = 5; +} + +// RoleBindingList is a collection of RoleBindings +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message RoleBindingList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of RoleBindings + repeated RoleBinding items = 2; +} + +// RoleBindingRestriction is an object that can be matched against a subject +// (user, group, or service account) to determine whether rolebindings on that +// subject are allowed in the namespace to which the RoleBindingRestriction +// belongs. If any one of those RoleBindingRestriction objects matches +// a subject, rolebindings on that subject in the namespace are allowed. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=rolebindingrestrictions,scope=Namespaced +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=config-operator,operatorOrdering=01 +// +openshift:compatibility-gen:level=1 +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +message RoleBindingRestriction { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec defines the matcher. + optional RoleBindingRestrictionSpec spec = 2; +} + +// RoleBindingRestrictionList is a collection of RoleBindingRestriction objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message RoleBindingRestrictionList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of RoleBindingRestriction objects. + repeated RoleBindingRestriction items = 2; +} + +// RoleBindingRestrictionSpec defines a rolebinding restriction. Exactly one +// field must be non-nil. +message RoleBindingRestrictionSpec { + // userrestriction matches against user subjects. + // +nullable + optional UserRestriction userrestriction = 1; + + // grouprestriction matches against group subjects. + // +nullable + optional GroupRestriction grouprestriction = 2; + + // serviceaccountrestriction matches against service-account subjects. + // +nullable + optional ServiceAccountRestriction serviceaccountrestriction = 3; +} + +// RoleList is a collection of Roles +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message RoleList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of Roles + repeated Role items = 2; +} + +// SelfSubjectRulesReview is a resource you can create to determine which actions you can perform in a namespace +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message SelfSubjectRulesReview { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 3; + + // spec adds information about how to conduct the check + optional SelfSubjectRulesReviewSpec spec = 1; + + // status is completed by the server to tell which permissions you have + optional SubjectRulesReviewStatus status = 2; +} + +// SelfSubjectRulesReviewSpec adds information about how to conduct the check +message SelfSubjectRulesReviewSpec { + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // Nil means "use the scopes on this request". + // +k8s:conversion-gen=false + optional OptionalScopes scopes = 1; +} + +// ServiceAccountReference specifies a service account and namespace by their +// names. +message ServiceAccountReference { + // name is the name of the service account. + optional string name = 1; + + // namespace is the namespace of the service account. Service accounts from + // inside the whitelisted namespaces are allowed to be bound to roles. If + // Namespace is empty, then the namespace of the RoleBindingRestriction in + // which the ServiceAccountReference is embedded is used. + optional string namespace = 2; +} + +// ServiceAccountRestriction matches a service account by a string match on +// either the service-account name or the name of the service account's +// namespace. +message ServiceAccountRestriction { + // serviceaccounts specifies a list of literal service-account names. + repeated ServiceAccountReference serviceaccounts = 1; + + // namespaces specifies a list of literal namespace names. + repeated string namespaces = 2; +} + +// SubjectAccessReview is an object for requesting information about whether a user or group can perform an action +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message SubjectAccessReview { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 5; + + // Action describes the action being tested. + optional Action Action = 1; + + // user is optional. If both User and Groups are empty, the current authenticated user is used. + optional string user = 2; + + // GroupsSlice is optional. Groups is the list of groups to which the User belongs. + // +k8s:conversion-gen=false + repeated string groups = 3; + + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // Nil for a self-SAR, means "use the scopes on this request". + // Nil for a regular SAR, means the same as empty. + // +k8s:conversion-gen=false + optional OptionalScopes scopes = 4; +} + +// SubjectAccessReviewResponse describes whether or not a user or group can perform an action +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message SubjectAccessReviewResponse { + // namespace is the namespace used for the access review + optional string namespace = 1; + + // allowed is required. True if the action would be allowed, false otherwise. + optional bool allowed = 2; + + // reason is optional. It indicates why a request was allowed or denied. + optional string reason = 3; + + // evaluationError is an indication that some error occurred during the authorization check. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is + // most common when a bound role is missing, but enough roles are still present and bound to reason about the request. + optional string evaluationError = 4; +} + +// SubjectRulesReview is a resource you can create to determine which actions another user can perform in a namespace +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message SubjectRulesReview { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 3; + + // spec adds information about how to conduct the check + optional SubjectRulesReviewSpec spec = 1; + + // status is completed by the server to tell which permissions you have + optional SubjectRulesReviewStatus status = 2; +} + +// SubjectRulesReviewSpec adds information about how to conduct the check +message SubjectRulesReviewSpec { + // user is optional. At least one of User and Groups must be specified. + optional string user = 1; + + // groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified. + repeated string groups = 2; + + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + optional OptionalScopes scopes = 3; +} + +// SubjectRulesReviewStatus is contains the result of a rules check +message SubjectRulesReviewStatus { + // rules is the list of rules (no particular sort) that are allowed for the subject + repeated PolicyRule rules = 1; + + // evaluationError can appear in combination with Rules. It means some error happened during evaluation + // that may have prevented additional rules from being populated. + optional string evaluationError = 2; +} + +// UserRestriction matches a user either by a string match on the user name, +// a string match on the name of a group to which the user belongs, or a label +// selector applied to the user labels. +message UserRestriction { + // users specifies a list of literal user names. + repeated string users = 1; + + // groups specifies a list of literal group names. + // +nullable + repeated string groups = 2; + + // Selectors specifies a list of label selectors over user labels. + // +nullable + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labels = 3; +} + diff --git a/vendor/github.com/openshift/api/authorization/v1/legacy.go b/vendor/github.com/openshift/api/authorization/v1/legacy.go new file mode 100644 index 0000000000000..f437a242ea8cb --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/legacy.go @@ -0,0 +1,43 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme, rbacv1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &Role{}, + &RoleBinding{}, + &RoleBindingList{}, + &RoleList{}, + + &SelfSubjectRulesReview{}, + &SubjectRulesReview{}, + &ResourceAccessReview{}, + &SubjectAccessReview{}, + &LocalResourceAccessReview{}, + &LocalSubjectAccessReview{}, + &ResourceAccessReviewResponse{}, + &SubjectAccessReviewResponse{}, + &IsPersonalSubjectAccessReview{}, + + &ClusterRole{}, + &ClusterRoleBinding{}, + &ClusterRoleBindingList{}, + &ClusterRoleList{}, + + &RoleBindingRestriction{}, + &RoleBindingRestrictionList{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/authorization/v1/register.go b/vendor/github.com/openshift/api/authorization/v1/register.go new file mode 100644 index 0000000000000..f1e12477b6516 --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/register.go @@ -0,0 +1,60 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "authorization.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme, rbacv1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &Role{}, + &RoleBinding{}, + &RoleBindingList{}, + &RoleList{}, + + &SelfSubjectRulesReview{}, + &SubjectRulesReview{}, + &ResourceAccessReview{}, + &SubjectAccessReview{}, + &LocalResourceAccessReview{}, + &LocalSubjectAccessReview{}, + &ResourceAccessReviewResponse{}, + &SubjectAccessReviewResponse{}, + &IsPersonalSubjectAccessReview{}, + + &ClusterRole{}, + &ClusterRoleBinding{}, + &ClusterRoleBindingList{}, + &ClusterRoleList{}, + + &RoleBindingRestriction{}, + &RoleBindingRestrictionList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/authorization/v1/types.go b/vendor/github.com/openshift/api/authorization/v1/types.go new file mode 100644 index 0000000000000..bf4071867f389 --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/types.go @@ -0,0 +1,661 @@ +package v1 + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kruntime "k8s.io/apimachinery/pkg/runtime" +) + +// Authorization is calculated against +// 1. all deny RoleBinding PolicyRules in the master namespace - short circuit on match +// 2. all allow RoleBinding PolicyRules in the master namespace - short circuit on match +// 3. all deny RoleBinding PolicyRules in the namespace - short circuit on match +// 4. all allow RoleBinding PolicyRules in the namespace - short circuit on match +// 5. deny by default + +const ( + // GroupKind is string representation of kind used in role binding subjects that represents the "group". + GroupKind = "Group" + // UserKind is string representation of kind used in role binding subjects that represents the "user". + UserKind = "User" + + ScopesKey = "scopes.authorization.openshift.io" +) + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +type PolicyRule struct { + // verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + // attributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. + // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error. + // +kubebuilder:pruning:PreserveUnknownFields + AttributeRestrictions kruntime.RawExtension `json:"attributeRestrictions,omitempty" protobuf:"bytes,2,opt,name=attributeRestrictions"` + // apiGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. + // That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request + // will be allowed + // +optional + // +nullable + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,3,rep,name=apiGroups"` + // resources is a list of resources this rule applies to. ResourceAll represents all resources. + Resources []string `json:"resources" protobuf:"bytes,4,rep,name=resources"` + // resourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,5,rep,name=resourceNames"` + // NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. + NonResourceURLsSlice []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,6,rep,name=nonResourceURLs"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IsPersonalSubjectAccessReview is a marker for PolicyRule.AttributeRestrictions that denotes that subjectaccessreviews on self should be allowed +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type IsPersonalSubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Role is a logical grouping of PolicyRules that can be referenced as a unit by RoleBindings. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Role struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // rules holds all the PolicyRules for this Role + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` +} + +// OptionalNames is an array that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type OptionalNames []string + +func (t OptionalNames) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleBinding references a Role, but not contain it. It can reference any Role in the same namespace or in the global namespace. +// It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. +// RoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces). +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type RoleBinding struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // userNames holds all the usernames directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + UserNames OptionalNames `json:"userNames" protobuf:"bytes,2,rep,name=userNames"` + // groupNames holds all the groups directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + GroupNames OptionalNames `json:"groupNames" protobuf:"bytes,3,rep,name=groupNames"` + // subjects hold object references to authorize with this rule. + // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. + // Thus newer clients that do not need to support backwards compatibility should send + // only fully qualified Subjects and should omit the UserNames and GroupNames fields. + // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames. + Subjects []corev1.ObjectReference `json:"subjects" protobuf:"bytes,4,rep,name=subjects"` + + // roleRef can only reference the current namespace and the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + // Since Policy is a singleton, this is sufficient knowledge to locate a role. + RoleRef corev1.ObjectReference `json:"roleRef" protobuf:"bytes,5,opt,name=roleRef"` +} + +// NamedRole relates a Role with a name +type NamedRole struct { + // name is the name of the role + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // role is the role being named + Role Role `json:"role" protobuf:"bytes,2,opt,name=role"` +} + +// NamedRoleBinding relates a role binding with a name +type NamedRoleBinding struct { + // name is the name of the role binding + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // roleBinding is the role binding being named + RoleBinding RoleBinding `json:"roleBinding" protobuf:"bytes,2,opt,name=roleBinding"` +} + +// +genclient +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SelfSubjectRulesReview is a resource you can create to determine which actions you can perform in a namespace +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type SelfSubjectRulesReview struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,3,opt,name=metadata"` + + // spec adds information about how to conduct the check + Spec SelfSubjectRulesReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"` + + // status is completed by the server to tell which permissions you have + Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// SelfSubjectRulesReviewSpec adds information about how to conduct the check +type SelfSubjectRulesReviewSpec struct { + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // Nil means "use the scopes on this request". + // +k8s:conversion-gen=false + Scopes OptionalScopes `json:"scopes" protobuf:"bytes,1,rep,name=scopes"` +} + +// +genclient +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SubjectRulesReview is a resource you can create to determine which actions another user can perform in a namespace +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type SubjectRulesReview struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,3,opt,name=metadata"` + + // spec adds information about how to conduct the check + Spec SubjectRulesReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"` + + // status is completed by the server to tell which permissions you have + Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// SubjectRulesReviewSpec adds information about how to conduct the check +type SubjectRulesReviewSpec struct { + // user is optional. At least one of User and Groups must be specified. + User string `json:"user" protobuf:"bytes,1,opt,name=user"` + // groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified. + Groups []string `json:"groups" protobuf:"bytes,2,rep,name=groups"` + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + Scopes OptionalScopes `json:"scopes" protobuf:"bytes,3,opt,name=scopes"` +} + +// SubjectRulesReviewStatus is contains the result of a rules check +type SubjectRulesReviewStatus struct { + // rules is the list of rules (no particular sort) that are allowed for the subject + Rules []PolicyRule `json:"rules" protobuf:"bytes,1,rep,name=rules"` + // evaluationError can appear in combination with Rules. It means some error happened during evaluation + // that may have prevented additional rules from being populated. + EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,2,opt,name=evaluationError"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceAccessReviewResponse describes who can perform the action +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ResourceAccessReviewResponse struct { + metav1.TypeMeta `json:",inline"` + + // namespace is the namespace used for the access review + Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` + // UsersSlice is the list of users who can perform the action + // +k8s:conversion-gen=false + UsersSlice []string `json:"users" protobuf:"bytes,2,rep,name=users"` + // GroupsSlice is the list of groups who can perform the action + // +k8s:conversion-gen=false + GroupsSlice []string `json:"groups" protobuf:"bytes,3,rep,name=groups"` + + // EvaluationError is an indication that some error occurred during resolution, but partial results can still be returned. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is + // most common when a bound role is missing, but enough roles are still present and bound to reason about the request. + EvaluationError string `json:"evalutionError" protobuf:"bytes,4,opt,name=evalutionError"` +} + +// +genclient +// +genclient:nonNamespaced +// +genclient:skipVerbs=apply,applyStatus,get,list,create,update,updateStatus,patch,delete,deleteCollection,watch +// +genclient:method=Create,verb=create,result=ResourceAccessReviewResponse +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceAccessReview is a means to request a list of which users and groups are authorized to perform the +// action specified by spec +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ResourceAccessReview struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,2,opt,name=metadata"` + + // Action describes the action being tested. + Action `json:",inline" protobuf:"bytes,1,opt,name=Action"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SubjectAccessReviewResponse describes whether or not a user or group can perform an action +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type SubjectAccessReviewResponse struct { + metav1.TypeMeta `json:",inline"` + + // namespace is the namespace used for the access review + Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` + // allowed is required. True if the action would be allowed, false otherwise. + Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"` + // reason is optional. It indicates why a request was allowed or denied. + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // evaluationError is an indication that some error occurred during the authorization check. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is + // most common when a bound role is missing, but enough roles are still present and bound to reason about the request. + EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,4,opt,name=evaluationError"` +} + +// OptionalScopes is an array that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type OptionalScopes []string + +func (t OptionalScopes) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +// +genclient +// +genclient:nonNamespaced +// +genclient:skipVerbs=apply,applyStatus,get,list,create,update,updateStatus,patch,delete,deleteCollection,watch +// +genclient:method=Create,verb=create,result=SubjectAccessReviewResponse +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SubjectAccessReview is an object for requesting information about whether a user or group can perform an action +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type SubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,5,opt,name=metadata"` + + // Action describes the action being tested. + Action `json:",inline" protobuf:"bytes,1,opt,name=Action"` + // user is optional. If both User and Groups are empty, the current authenticated user is used. + User string `json:"user" protobuf:"bytes,2,opt,name=user"` + // GroupsSlice is optional. Groups is the list of groups to which the User belongs. + // +k8s:conversion-gen=false + GroupsSlice []string `json:"groups" protobuf:"bytes,3,rep,name=groups"` + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // Nil for a self-SAR, means "use the scopes on this request". + // Nil for a regular SAR, means the same as empty. + // +k8s:conversion-gen=false + Scopes OptionalScopes `json:"scopes" protobuf:"bytes,4,rep,name=scopes"` +} + +// +genclient +// +genclient:skipVerbs=apply,applyStatus,get,list,create,update,updateStatus,patch,delete,deleteCollection,watch +// +genclient:method=Create,verb=create,result=ResourceAccessReviewResponse +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LocalResourceAccessReview is a means to request a list of which users and groups are authorized to perform the action specified by spec in a particular namespace +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type LocalResourceAccessReview struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,2,opt,name=metadata"` + + // Action describes the action being tested. The Namespace element is FORCED to the current namespace. + Action `json:",inline" protobuf:"bytes,1,opt,name=Action"` +} + +// +genclient +// +genclient:skipVerbs=apply,applyStatus,get,list,create,update,updateStatus,patch,delete,deleteCollection,watch +// +genclient:method=Create,verb=create,result=SubjectAccessReviewResponse +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LocalSubjectAccessReview is an object for requesting information about whether a user or group can perform an action in a particular namespace +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type LocalSubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,5,opt,name=metadata"` + + // Action describes the action being tested. The Namespace element is FORCED to the current namespace. + Action `json:",inline" protobuf:"bytes,1,opt,name=Action"` + // user is optional. If both User and Groups are empty, the current authenticated user is used. + User string `json:"user" protobuf:"bytes,2,opt,name=user"` + // groups is optional. Groups is the list of groups to which the User belongs. + // +k8s:conversion-gen=false + GroupsSlice []string `json:"groups" protobuf:"bytes,3,rep,name=groups"` + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // Nil for a self-SAR, means "use the scopes on this request". + // Nil for a regular SAR, means the same as empty. + // +k8s:conversion-gen=false + Scopes OptionalScopes `json:"scopes" protobuf:"bytes,4,rep,name=scopes"` +} + +// Action describes a request to the API server +type Action struct { + // namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // verb is one of: get, list, watch, create, update, delete + Verb string `json:"verb" protobuf:"bytes,2,opt,name=verb"` + // Group is the API group of the resource + // Serialized as resourceAPIGroup to avoid confusion with the 'groups' field when inlined + Group string `json:"resourceAPIGroup" protobuf:"bytes,3,opt,name=resourceAPIGroup"` + // Version is the API version of the resource + // Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined + Version string `json:"resourceAPIVersion" protobuf:"bytes,4,opt,name=resourceAPIVersion"` + // resource is one of the existing resource types + Resource string `json:"resource" protobuf:"bytes,5,opt,name=resource"` + // resourceName is the name of the resource being requested for a "get" or deleted for a "delete" + ResourceName string `json:"resourceName" protobuf:"bytes,6,opt,name=resourceName"` + // path is the path of a non resource URL + Path string `json:"path" protobuf:"bytes,8,opt,name=path"` + // isNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy) + IsNonResourceURL bool `json:"isNonResourceURL" protobuf:"varint,9,opt,name=isNonResourceURL"` + // content is the actual content of the request for create and update + // +kubebuilder:pruning:PreserveUnknownFields + Content kruntime.RawExtension `json:"content,omitempty" protobuf:"bytes,7,opt,name=content"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleBindingList is a collection of RoleBindings +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type RoleBindingList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of RoleBindings + Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleList is a collection of Roles +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type RoleList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of Roles + Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRole is a logical grouping of PolicyRules that can be referenced as a unit by ClusterRoleBindings. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterRole struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // rules holds all the PolicyRules for this ClusterRole + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + + // aggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + AggregationRule *rbacv1.AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference any ClusterRole in the same namespace or in the global namespace. +// It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. +// ClusterRoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces). +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterRoleBinding struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // userNames holds all the usernames directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + UserNames OptionalNames `json:"userNames" protobuf:"bytes,2,rep,name=userNames"` + // groupNames holds all the groups directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + GroupNames OptionalNames `json:"groupNames" protobuf:"bytes,3,rep,name=groupNames"` + // subjects hold object references to authorize with this rule. + // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. + // Thus newer clients that do not need to support backwards compatibility should send + // only fully qualified Subjects and should omit the UserNames and GroupNames fields. + // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames. + Subjects []corev1.ObjectReference `json:"subjects" protobuf:"bytes,4,rep,name=subjects"` + + // roleRef can only reference the current namespace and the global namespace. + // If the ClusterRoleRef cannot be resolved, the Authorizer must return an error. + // Since Policy is a singleton, this is sufficient knowledge to locate a role. + RoleRef corev1.ObjectReference `json:"roleRef" protobuf:"bytes,5,opt,name=roleRef"` +} + +// NamedClusterRole relates a name with a cluster role +type NamedClusterRole struct { + // name is the name of the cluster role + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // role is the cluster role being named + Role ClusterRole `json:"role" protobuf:"bytes,2,opt,name=role"` +} + +// NamedClusterRoleBinding relates a name with a cluster role binding +type NamedClusterRoleBinding struct { + // name is the name of the cluster role binding + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // roleBinding is the cluster role binding being named + RoleBinding ClusterRoleBinding `json:"roleBinding" protobuf:"bytes,2,opt,name=roleBinding"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterRoleBindingList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of ClusterRoleBindings + Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRoleList is a collection of ClusterRoles +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterRoleList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of ClusterRoles + Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleBindingRestriction is an object that can be matched against a subject +// (user, group, or service account) to determine whether rolebindings on that +// subject are allowed in the namespace to which the RoleBindingRestriction +// belongs. If any one of those RoleBindingRestriction objects matches +// a subject, rolebindings on that subject in the namespace are allowed. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=rolebindingrestrictions,scope=Namespaced +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=config-operator,operatorOrdering=01 +// +openshift:compatibility-gen:level=1 +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type RoleBindingRestriction struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + + // spec defines the matcher. + Spec RoleBindingRestrictionSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// RoleBindingRestrictionSpec defines a rolebinding restriction. Exactly one +// field must be non-nil. +type RoleBindingRestrictionSpec struct { + // userrestriction matches against user subjects. + // +nullable + UserRestriction *UserRestriction `json:"userrestriction" protobuf:"bytes,1,opt,name=userrestriction"` + + // grouprestriction matches against group subjects. + // +nullable + GroupRestriction *GroupRestriction `json:"grouprestriction" protobuf:"bytes,2,opt,name=grouprestriction"` + + // serviceaccountrestriction matches against service-account subjects. + // +nullable + ServiceAccountRestriction *ServiceAccountRestriction `json:"serviceaccountrestriction" protobuf:"bytes,3,opt,name=serviceaccountrestriction"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleBindingRestrictionList is a collection of RoleBindingRestriction objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type RoleBindingRestrictionList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of RoleBindingRestriction objects. + Items []RoleBindingRestriction `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// UserRestriction matches a user either by a string match on the user name, +// a string match on the name of a group to which the user belongs, or a label +// selector applied to the user labels. +type UserRestriction struct { + // users specifies a list of literal user names. + Users []string `json:"users" protobuf:"bytes,1,rep,name=users"` + + // groups specifies a list of literal group names. + // +nullable + Groups []string `json:"groups" protobuf:"bytes,2,rep,name=groups"` + + // Selectors specifies a list of label selectors over user labels. + // +nullable + Selectors []metav1.LabelSelector `json:"labels" protobuf:"bytes,3,rep,name=labels"` +} + +// GroupRestriction matches a group either by a string match on the group name +// or a label selector applied to group labels. +type GroupRestriction struct { + // groups is a list of groups used to match against an individual user's + // groups. If the user is a member of one of the whitelisted groups, the user + // is allowed to be bound to a role. + // +nullable + Groups []string `json:"groups" protobuf:"bytes,1,rep,name=groups"` + + // Selectors specifies a list of label selectors over group labels. + // +nullable + Selectors []metav1.LabelSelector `json:"labels" protobuf:"bytes,2,rep,name=labels"` +} + +// ServiceAccountRestriction matches a service account by a string match on +// either the service-account name or the name of the service account's +// namespace. +type ServiceAccountRestriction struct { + // serviceaccounts specifies a list of literal service-account names. + ServiceAccounts []ServiceAccountReference `json:"serviceaccounts" protobuf:"bytes,1,rep,name=serviceaccounts"` + + // namespaces specifies a list of literal namespace names. + Namespaces []string `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"` +} + +// ServiceAccountReference specifies a service account and namespace by their +// names. +type ServiceAccountReference struct { + // name is the name of the service account. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // namespace is the namespace of the service account. Service accounts from + // inside the whitelisted namespaces are allowed to be bound to roles. If + // Namespace is empty, then the namespace of the RoleBindingRestriction in + // which the ServiceAccountReference is embedded is used. + Namespace string `json:"namespace" protobuf:"bytes,2,opt,name=namespace"` +} diff --git a/vendor/github.com/openshift/api/authorization/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/authorization/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..9b7d44f3b2fbf --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/zz_generated.deepcopy.go @@ -0,0 +1,1000 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Action) DeepCopyInto(out *Action) { + *out = *in + in.Content.DeepCopyInto(&out.Content) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Action. +func (in *Action) DeepCopy() *Action { + if in == nil { + return nil + } + out := new(Action) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AggregationRule != nil { + in, out := &in.AggregationRule, &out.AggregationRule + *out = new(rbacv1.AggregationRule) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRole. +func (in *ClusterRole) DeepCopy() *ClusterRole { + if in == nil { + return nil + } + out := new(ClusterRole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRole) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleBinding) DeepCopyInto(out *ClusterRoleBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.UserNames != nil { + in, out := &in.UserNames, &out.UserNames + *out = make(OptionalNames, len(*in)) + copy(*out, *in) + } + if in.GroupNames != nil { + in, out := &in.GroupNames, &out.GroupNames + *out = make(OptionalNames, len(*in)) + copy(*out, *in) + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]corev1.ObjectReference, len(*in)) + copy(*out, *in) + } + out.RoleRef = in.RoleRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBinding. +func (in *ClusterRoleBinding) DeepCopy() *ClusterRoleBinding { + if in == nil { + return nil + } + out := new(ClusterRoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBindingList. +func (in *ClusterRoleBindingList) DeepCopy() *ClusterRoleBindingList { + if in == nil { + return nil + } + out := new(ClusterRoleBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRole, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleList. +func (in *ClusterRoleList) DeepCopy() *ClusterRoleList { + if in == nil { + return nil + } + out := new(ClusterRoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupRestriction) DeepCopyInto(out *GroupRestriction) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]metav1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupRestriction. +func (in *GroupRestriction) DeepCopy() *GroupRestriction { + if in == nil { + return nil + } + out := new(GroupRestriction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IsPersonalSubjectAccessReview) DeepCopyInto(out *IsPersonalSubjectAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IsPersonalSubjectAccessReview. +func (in *IsPersonalSubjectAccessReview) DeepCopy() *IsPersonalSubjectAccessReview { + if in == nil { + return nil + } + out := new(IsPersonalSubjectAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IsPersonalSubjectAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalResourceAccessReview) DeepCopyInto(out *LocalResourceAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Action.DeepCopyInto(&out.Action) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalResourceAccessReview. +func (in *LocalResourceAccessReview) DeepCopy() *LocalResourceAccessReview { + if in == nil { + return nil + } + out := new(LocalResourceAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LocalResourceAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Action.DeepCopyInto(&out.Action) + if in.GroupsSlice != nil { + in, out := &in.GroupsSlice, &out.GroupsSlice + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make(OptionalScopes, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalSubjectAccessReview. +func (in *LocalSubjectAccessReview) DeepCopy() *LocalSubjectAccessReview { + if in == nil { + return nil + } + out := new(LocalSubjectAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LocalSubjectAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedClusterRole) DeepCopyInto(out *NamedClusterRole) { + *out = *in + in.Role.DeepCopyInto(&out.Role) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedClusterRole. +func (in *NamedClusterRole) DeepCopy() *NamedClusterRole { + if in == nil { + return nil + } + out := new(NamedClusterRole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedClusterRoleBinding) DeepCopyInto(out *NamedClusterRoleBinding) { + *out = *in + in.RoleBinding.DeepCopyInto(&out.RoleBinding) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedClusterRoleBinding. +func (in *NamedClusterRoleBinding) DeepCopy() *NamedClusterRoleBinding { + if in == nil { + return nil + } + out := new(NamedClusterRoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedRole) DeepCopyInto(out *NamedRole) { + *out = *in + in.Role.DeepCopyInto(&out.Role) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedRole. +func (in *NamedRole) DeepCopy() *NamedRole { + if in == nil { + return nil + } + out := new(NamedRole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedRoleBinding) DeepCopyInto(out *NamedRoleBinding) { + *out = *in + in.RoleBinding.DeepCopyInto(&out.RoleBinding) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedRoleBinding. +func (in *NamedRoleBinding) DeepCopy() *NamedRoleBinding { + if in == nil { + return nil + } + out := new(NamedRoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in OptionalNames) DeepCopyInto(out *OptionalNames) { + { + in := &in + *out = make(OptionalNames, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalNames. +func (in OptionalNames) DeepCopy() OptionalNames { + if in == nil { + return nil + } + out := new(OptionalNames) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in OptionalScopes) DeepCopyInto(out *OptionalScopes) { + { + in := &in + *out = make(OptionalScopes, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalScopes. +func (in OptionalScopes) DeepCopy() OptionalScopes { + if in == nil { + return nil + } + out := new(OptionalScopes) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.AttributeRestrictions.DeepCopyInto(&out.AttributeRestrictions) + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLsSlice != nil { + in, out := &in.NonResourceURLsSlice, &out.NonResourceURLsSlice + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule. +func (in *PolicyRule) DeepCopy() *PolicyRule { + if in == nil { + return nil + } + out := new(PolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAccessReview) DeepCopyInto(out *ResourceAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Action.DeepCopyInto(&out.Action) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAccessReview. +func (in *ResourceAccessReview) DeepCopy() *ResourceAccessReview { + if in == nil { + return nil + } + out := new(ResourceAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAccessReviewResponse) DeepCopyInto(out *ResourceAccessReviewResponse) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.UsersSlice != nil { + in, out := &in.UsersSlice, &out.UsersSlice + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GroupsSlice != nil { + in, out := &in.GroupsSlice, &out.GroupsSlice + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAccessReviewResponse. +func (in *ResourceAccessReviewResponse) DeepCopy() *ResourceAccessReviewResponse { + if in == nil { + return nil + } + out := new(ResourceAccessReviewResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceAccessReviewResponse) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Role) DeepCopyInto(out *Role) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role. +func (in *Role) DeepCopy() *Role { + if in == nil { + return nil + } + out := new(Role) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Role) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBinding) DeepCopyInto(out *RoleBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.UserNames != nil { + in, out := &in.UserNames, &out.UserNames + *out = make(OptionalNames, len(*in)) + copy(*out, *in) + } + if in.GroupNames != nil { + in, out := &in.GroupNames, &out.GroupNames + *out = make(OptionalNames, len(*in)) + copy(*out, *in) + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]corev1.ObjectReference, len(*in)) + copy(*out, *in) + } + out.RoleRef = in.RoleRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding. +func (in *RoleBinding) DeepCopy() *RoleBinding { + if in == nil { + return nil + } + out := new(RoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingList. +func (in *RoleBindingList) DeepCopy() *RoleBindingList { + if in == nil { + return nil + } + out := new(RoleBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBindingRestriction) DeepCopyInto(out *RoleBindingRestriction) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingRestriction. +func (in *RoleBindingRestriction) DeepCopy() *RoleBindingRestriction { + if in == nil { + return nil + } + out := new(RoleBindingRestriction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBindingRestriction) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBindingRestrictionList) DeepCopyInto(out *RoleBindingRestrictionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBindingRestriction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingRestrictionList. +func (in *RoleBindingRestrictionList) DeepCopy() *RoleBindingRestrictionList { + if in == nil { + return nil + } + out := new(RoleBindingRestrictionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBindingRestrictionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBindingRestrictionSpec) DeepCopyInto(out *RoleBindingRestrictionSpec) { + *out = *in + if in.UserRestriction != nil { + in, out := &in.UserRestriction, &out.UserRestriction + *out = new(UserRestriction) + (*in).DeepCopyInto(*out) + } + if in.GroupRestriction != nil { + in, out := &in.GroupRestriction, &out.GroupRestriction + *out = new(GroupRestriction) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountRestriction != nil { + in, out := &in.ServiceAccountRestriction, &out.ServiceAccountRestriction + *out = new(ServiceAccountRestriction) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingRestrictionSpec. +func (in *RoleBindingRestrictionSpec) DeepCopy() *RoleBindingRestrictionSpec { + if in == nil { + return nil + } + out := new(RoleBindingRestrictionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleList) DeepCopyInto(out *RoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Role, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleList. +func (in *RoleList) DeepCopy() *RoleList { + if in == nil { + return nil + } + out := new(RoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectRulesReview) DeepCopyInto(out *SelfSubjectRulesReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReview. +func (in *SelfSubjectRulesReview) DeepCopy() *SelfSubjectRulesReview { + if in == nil { + return nil + } + out := new(SelfSubjectRulesReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelfSubjectRulesReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectRulesReviewSpec) DeepCopyInto(out *SelfSubjectRulesReviewSpec) { + *out = *in + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make(OptionalScopes, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReviewSpec. +func (in *SelfSubjectRulesReviewSpec) DeepCopy() *SelfSubjectRulesReviewSpec { + if in == nil { + return nil + } + out := new(SelfSubjectRulesReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountReference) DeepCopyInto(out *ServiceAccountReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountReference. +func (in *ServiceAccountReference) DeepCopy() *ServiceAccountReference { + if in == nil { + return nil + } + out := new(ServiceAccountReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountRestriction) DeepCopyInto(out *ServiceAccountRestriction) { + *out = *in + if in.ServiceAccounts != nil { + in, out := &in.ServiceAccounts, &out.ServiceAccounts + *out = make([]ServiceAccountReference, len(*in)) + copy(*out, *in) + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountRestriction. +func (in *ServiceAccountRestriction) DeepCopy() *ServiceAccountRestriction { + if in == nil { + return nil + } + out := new(ServiceAccountRestriction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAccessReview) DeepCopyInto(out *SubjectAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Action.DeepCopyInto(&out.Action) + if in.GroupsSlice != nil { + in, out := &in.GroupsSlice, &out.GroupsSlice + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make(OptionalScopes, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReview. +func (in *SubjectAccessReview) DeepCopy() *SubjectAccessReview { + if in == nil { + return nil + } + out := new(SubjectAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubjectAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAccessReviewResponse) DeepCopyInto(out *SubjectAccessReviewResponse) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReviewResponse. +func (in *SubjectAccessReviewResponse) DeepCopy() *SubjectAccessReviewResponse { + if in == nil { + return nil + } + out := new(SubjectAccessReviewResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubjectAccessReviewResponse) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectRulesReview) DeepCopyInto(out *SubjectRulesReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReview. +func (in *SubjectRulesReview) DeepCopy() *SubjectRulesReview { + if in == nil { + return nil + } + out := new(SubjectRulesReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubjectRulesReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectRulesReviewSpec) DeepCopyInto(out *SubjectRulesReviewSpec) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make(OptionalScopes, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReviewSpec. +func (in *SubjectRulesReviewSpec) DeepCopy() *SubjectRulesReviewSpec { + if in == nil { + return nil + } + out := new(SubjectRulesReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectRulesReviewStatus) DeepCopyInto(out *SubjectRulesReviewStatus) { + *out = *in + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReviewStatus. +func (in *SubjectRulesReviewStatus) DeepCopy() *SubjectRulesReviewStatus { + if in == nil { + return nil + } + out := new(SubjectRulesReviewStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserRestriction) DeepCopyInto(out *UserRestriction) { + *out = *in + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]metav1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserRestriction. +func (in *UserRestriction) DeepCopy() *UserRestriction { + if in == nil { + return nil + } + out := new(UserRestriction) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/authorization/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/authorization/v1/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 0000000000000..c708c0fa0219b --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,22 @@ +rolebindingrestrictions.authorization.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: rolebindingrestrictions.authorization.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_03" + GroupName: authorization.openshift.io + HasStatus: false + KindName: RoleBindingRestriction + Labels: {} + PluralName: rolebindingrestrictions + PrinterColumns: [] + Scope: Namespaced + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + diff --git a/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000..a1c28a3ec127e --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,370 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Action = map[string]string{ + "": "Action describes a request to the API server", + "namespace": "namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces", + "verb": "verb is one of: get, list, watch, create, update, delete", + "resourceAPIGroup": "Group is the API group of the resource Serialized as resourceAPIGroup to avoid confusion with the 'groups' field when inlined", + "resourceAPIVersion": "Version is the API version of the resource Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined", + "resource": "resource is one of the existing resource types", + "resourceName": "resourceName is the name of the resource being requested for a \"get\" or deleted for a \"delete\"", + "path": "path is the path of a non resource URL", + "isNonResourceURL": "isNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)", + "content": "content is the actual content of the request for create and update", +} + +func (Action) SwaggerDoc() map[string]string { + return map_Action +} + +var map_ClusterRole = map[string]string{ + "": "ClusterRole is a logical grouping of PolicyRules that can be referenced as a unit by ClusterRoleBindings.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "rules": "rules holds all the PolicyRules for this ClusterRole", + "aggregationRule": "aggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", +} + +func (ClusterRole) SwaggerDoc() map[string]string { + return map_ClusterRole +} + +var map_ClusterRoleBinding = map[string]string{ + "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference any ClusterRole in the same namespace or in the global namespace. It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. ClusterRoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "userNames": "userNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "groupNames": "groupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "subjects": "subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.", + "roleRef": "roleRef can only reference the current namespace and the global namespace. If the ClusterRoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.", +} + +func (ClusterRoleBinding) SwaggerDoc() map[string]string { + return map_ClusterRoleBinding +} + +var map_ClusterRoleBindingList = map[string]string{ + "": "ClusterRoleBindingList is a collection of ClusterRoleBindings\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of ClusterRoleBindings", +} + +func (ClusterRoleBindingList) SwaggerDoc() map[string]string { + return map_ClusterRoleBindingList +} + +var map_ClusterRoleList = map[string]string{ + "": "ClusterRoleList is a collection of ClusterRoles\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of ClusterRoles", +} + +func (ClusterRoleList) SwaggerDoc() map[string]string { + return map_ClusterRoleList +} + +var map_GroupRestriction = map[string]string{ + "": "GroupRestriction matches a group either by a string match on the group name or a label selector applied to group labels.", + "groups": "groups is a list of groups used to match against an individual user's groups. If the user is a member of one of the whitelisted groups, the user is allowed to be bound to a role.", + "labels": "Selectors specifies a list of label selectors over group labels.", +} + +func (GroupRestriction) SwaggerDoc() map[string]string { + return map_GroupRestriction +} + +var map_IsPersonalSubjectAccessReview = map[string]string{ + "": "IsPersonalSubjectAccessReview is a marker for PolicyRule.AttributeRestrictions that denotes that subjectaccessreviews on self should be allowed\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (IsPersonalSubjectAccessReview) SwaggerDoc() map[string]string { + return map_IsPersonalSubjectAccessReview +} + +var map_LocalResourceAccessReview = map[string]string{ + "": "LocalResourceAccessReview is a means to request a list of which users and groups are authorized to perform the action specified by spec in a particular namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (LocalResourceAccessReview) SwaggerDoc() map[string]string { + return map_LocalResourceAccessReview +} + +var map_LocalSubjectAccessReview = map[string]string{ + "": "LocalSubjectAccessReview is an object for requesting information about whether a user or group can perform an action in a particular namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "user": "user is optional. If both User and Groups are empty, the current authenticated user is used.", + "groups": "groups is optional. Groups is the list of groups to which the User belongs.", + "scopes": "scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.", +} + +func (LocalSubjectAccessReview) SwaggerDoc() map[string]string { + return map_LocalSubjectAccessReview +} + +var map_NamedClusterRole = map[string]string{ + "": "NamedClusterRole relates a name with a cluster role", + "name": "name is the name of the cluster role", + "role": "role is the cluster role being named", +} + +func (NamedClusterRole) SwaggerDoc() map[string]string { + return map_NamedClusterRole +} + +var map_NamedClusterRoleBinding = map[string]string{ + "": "NamedClusterRoleBinding relates a name with a cluster role binding", + "name": "name is the name of the cluster role binding", + "roleBinding": "roleBinding is the cluster role binding being named", +} + +func (NamedClusterRoleBinding) SwaggerDoc() map[string]string { + return map_NamedClusterRoleBinding +} + +var map_NamedRole = map[string]string{ + "": "NamedRole relates a Role with a name", + "name": "name is the name of the role", + "role": "role is the role being named", +} + +func (NamedRole) SwaggerDoc() map[string]string { + return map_NamedRole +} + +var map_NamedRoleBinding = map[string]string{ + "": "NamedRoleBinding relates a role binding with a name", + "name": "name is the name of the role binding", + "roleBinding": "roleBinding is the role binding being named", +} + +func (NamedRoleBinding) SwaggerDoc() map[string]string { + return map_NamedRoleBinding +} + +var map_PolicyRule = map[string]string{ + "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", + "verbs": "verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", + "attributeRestrictions": "attributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.", + "apiGroups": "apiGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request will be allowed", + "resources": "resources is a list of resources this rule applies to. ResourceAll represents all resources.", + "resourceNames": "resourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "nonResourceURLs": "NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different.", +} + +func (PolicyRule) SwaggerDoc() map[string]string { + return map_PolicyRule +} + +var map_ResourceAccessReview = map[string]string{ + "": "ResourceAccessReview is a means to request a list of which users and groups are authorized to perform the action specified by spec\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ResourceAccessReview) SwaggerDoc() map[string]string { + return map_ResourceAccessReview +} + +var map_ResourceAccessReviewResponse = map[string]string{ + "": "ResourceAccessReviewResponse describes who can perform the action\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "namespace": "namespace is the namespace used for the access review", + "users": "UsersSlice is the list of users who can perform the action", + "groups": "GroupsSlice is the list of groups who can perform the action", + "evalutionError": "EvaluationError is an indication that some error occurred during resolution, but partial results can still be returned. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is most common when a bound role is missing, but enough roles are still present and bound to reason about the request.", +} + +func (ResourceAccessReviewResponse) SwaggerDoc() map[string]string { + return map_ResourceAccessReviewResponse +} + +var map_Role = map[string]string{ + "": "Role is a logical grouping of PolicyRules that can be referenced as a unit by RoleBindings.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "rules": "rules holds all the PolicyRules for this Role", +} + +func (Role) SwaggerDoc() map[string]string { + return map_Role +} + +var map_RoleBinding = map[string]string{ + "": "RoleBinding references a Role, but not contain it. It can reference any Role in the same namespace or in the global namespace. It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "userNames": "userNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "groupNames": "groupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "subjects": "subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.", + "roleRef": "roleRef can only reference the current namespace and the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.", +} + +func (RoleBinding) SwaggerDoc() map[string]string { + return map_RoleBinding +} + +var map_RoleBindingList = map[string]string{ + "": "RoleBindingList is a collection of RoleBindings\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of RoleBindings", +} + +func (RoleBindingList) SwaggerDoc() map[string]string { + return map_RoleBindingList +} + +var map_RoleBindingRestriction = map[string]string{ + "": "RoleBindingRestriction is an object that can be matched against a subject (user, group, or service account) to determine whether rolebindings on that subject are allowed in the namespace to which the RoleBindingRestriction belongs. If any one of those RoleBindingRestriction objects matches a subject, rolebindings on that subject in the namespace are allowed.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec defines the matcher.", +} + +func (RoleBindingRestriction) SwaggerDoc() map[string]string { + return map_RoleBindingRestriction +} + +var map_RoleBindingRestrictionList = map[string]string{ + "": "RoleBindingRestrictionList is a collection of RoleBindingRestriction objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of RoleBindingRestriction objects.", +} + +func (RoleBindingRestrictionList) SwaggerDoc() map[string]string { + return map_RoleBindingRestrictionList +} + +var map_RoleBindingRestrictionSpec = map[string]string{ + "": "RoleBindingRestrictionSpec defines a rolebinding restriction. Exactly one field must be non-nil.", + "userrestriction": "userrestriction matches against user subjects.", + "grouprestriction": "grouprestriction matches against group subjects.", + "serviceaccountrestriction": "serviceaccountrestriction matches against service-account subjects.", +} + +func (RoleBindingRestrictionSpec) SwaggerDoc() map[string]string { + return map_RoleBindingRestrictionSpec +} + +var map_RoleList = map[string]string{ + "": "RoleList is a collection of Roles\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of Roles", +} + +func (RoleList) SwaggerDoc() map[string]string { + return map_RoleList +} + +var map_SelfSubjectRulesReview = map[string]string{ + "": "SelfSubjectRulesReview is a resource you can create to determine which actions you can perform in a namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec adds information about how to conduct the check", + "status": "status is completed by the server to tell which permissions you have", +} + +func (SelfSubjectRulesReview) SwaggerDoc() map[string]string { + return map_SelfSubjectRulesReview +} + +var map_SelfSubjectRulesReviewSpec = map[string]string{ + "": "SelfSubjectRulesReviewSpec adds information about how to conduct the check", + "scopes": "scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil means \"use the scopes on this request\".", +} + +func (SelfSubjectRulesReviewSpec) SwaggerDoc() map[string]string { + return map_SelfSubjectRulesReviewSpec +} + +var map_ServiceAccountReference = map[string]string{ + "": "ServiceAccountReference specifies a service account and namespace by their names.", + "name": "name is the name of the service account.", + "namespace": "namespace is the namespace of the service account. Service accounts from inside the whitelisted namespaces are allowed to be bound to roles. If Namespace is empty, then the namespace of the RoleBindingRestriction in which the ServiceAccountReference is embedded is used.", +} + +func (ServiceAccountReference) SwaggerDoc() map[string]string { + return map_ServiceAccountReference +} + +var map_ServiceAccountRestriction = map[string]string{ + "": "ServiceAccountRestriction matches a service account by a string match on either the service-account name or the name of the service account's namespace.", + "serviceaccounts": "serviceaccounts specifies a list of literal service-account names.", + "namespaces": "namespaces specifies a list of literal namespace names.", +} + +func (ServiceAccountRestriction) SwaggerDoc() map[string]string { + return map_ServiceAccountRestriction +} + +var map_SubjectAccessReview = map[string]string{ + "": "SubjectAccessReview is an object for requesting information about whether a user or group can perform an action\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "user": "user is optional. If both User and Groups are empty, the current authenticated user is used.", + "groups": "GroupsSlice is optional. Groups is the list of groups to which the User belongs.", + "scopes": "scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.", +} + +func (SubjectAccessReview) SwaggerDoc() map[string]string { + return map_SubjectAccessReview +} + +var map_SubjectAccessReviewResponse = map[string]string{ + "": "SubjectAccessReviewResponse describes whether or not a user or group can perform an action\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "namespace": "namespace is the namespace used for the access review", + "allowed": "allowed is required. True if the action would be allowed, false otherwise.", + "reason": "reason is optional. It indicates why a request was allowed or denied.", + "evaluationError": "evaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is most common when a bound role is missing, but enough roles are still present and bound to reason about the request.", +} + +func (SubjectAccessReviewResponse) SwaggerDoc() map[string]string { + return map_SubjectAccessReviewResponse +} + +var map_SubjectRulesReview = map[string]string{ + "": "SubjectRulesReview is a resource you can create to determine which actions another user can perform in a namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec adds information about how to conduct the check", + "status": "status is completed by the server to tell which permissions you have", +} + +func (SubjectRulesReview) SwaggerDoc() map[string]string { + return map_SubjectRulesReview +} + +var map_SubjectRulesReviewSpec = map[string]string{ + "": "SubjectRulesReviewSpec adds information about how to conduct the check", + "user": "user is optional. At least one of User and Groups must be specified.", + "groups": "groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified.", + "scopes": "scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\".", +} + +func (SubjectRulesReviewSpec) SwaggerDoc() map[string]string { + return map_SubjectRulesReviewSpec +} + +var map_SubjectRulesReviewStatus = map[string]string{ + "": "SubjectRulesReviewStatus is contains the result of a rules check", + "rules": "rules is the list of rules (no particular sort) that are allowed for the subject", + "evaluationError": "evaluationError can appear in combination with Rules. It means some error happened during evaluation that may have prevented additional rules from being populated.", +} + +func (SubjectRulesReviewStatus) SwaggerDoc() map[string]string { + return map_SubjectRulesReviewStatus +} + +var map_UserRestriction = map[string]string{ + "": "UserRestriction matches a user either by a string match on the user name, a string match on the name of a group to which the user belongs, or a label selector applied to the user labels.", + "users": "users specifies a list of literal user names.", + "groups": "groups specifies a list of literal group names.", + "labels": "Selectors specifies a list of label selectors over user labels.", +} + +func (UserRestriction) SwaggerDoc() map[string]string { + return map_UserRestriction +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/build/v1/consts.go b/vendor/github.com/openshift/api/build/v1/consts.go new file mode 100644 index 0000000000000..0d9c8f03b3035 --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/consts.go @@ -0,0 +1,202 @@ +package v1 + +// annotations +const ( + // BuildAnnotation is an annotation that identifies a Pod as being for a Build + BuildAnnotation = "openshift.io/build.name" + + // BuildConfigAnnotation is an annotation that identifies the BuildConfig that a Build was created from + BuildConfigAnnotation = "openshift.io/build-config.name" + + // BuildCloneAnnotation is an annotation whose value is the name of the build this build was cloned from + BuildCloneAnnotation = "openshift.io/build.clone-of" + + // BuildNumberAnnotation is an annotation whose value is the sequential number for this Build + BuildNumberAnnotation = "openshift.io/build.number" + + // BuildPodNameAnnotation is an annotation whose value is the name of the pod running this build + BuildPodNameAnnotation = "openshift.io/build.pod-name" + + // BuildJenkinsStatusJSONAnnotation is an annotation holding the Jenkins status information + BuildJenkinsStatusJSONAnnotation = "openshift.io/jenkins-status-json" + + // BuildJenkinsLogURLAnnotation is an annotation holding a link to the raw Jenkins build console log + BuildJenkinsLogURLAnnotation = "openshift.io/jenkins-log-url" + + // BuildJenkinsConsoleLogURLAnnotation is an annotation holding a link to the Jenkins build console log (including Jenkins chrome wrappering) + BuildJenkinsConsoleLogURLAnnotation = "openshift.io/jenkins-console-log-url" + + // BuildJenkinsBlueOceanLogURLAnnotation is an annotation holding a link to the Jenkins build console log via the Jenkins BlueOcean UI Plugin + BuildJenkinsBlueOceanLogURLAnnotation = "openshift.io/jenkins-blueocean-log-url" + + // BuildJenkinsBuildURIAnnotation is an annotation holding a link to the Jenkins build + BuildJenkinsBuildURIAnnotation = "openshift.io/jenkins-build-uri" + + // BuildSourceSecretMatchURIAnnotationPrefix is a prefix for annotations on a Secret which indicate a source URI against which the Secret can be used + BuildSourceSecretMatchURIAnnotationPrefix = "build.openshift.io/source-secret-match-uri-" + + // BuildConfigPausedAnnotation is an annotation that marks a BuildConfig as paused. + // New Builds cannot be instantiated from a paused BuildConfig. + BuildConfigPausedAnnotation = "openshift.io/build-config.paused" +) + +// labels +const ( + // BuildConfigLabel is the key of a Build label whose value is the ID of a BuildConfig + // on which the Build is based. NOTE: The value for this label may not contain the entire + // BuildConfig name because it will be truncated to maximum label length. + BuildConfigLabel = "openshift.io/build-config.name" + + // BuildLabel is the key of a Pod label whose value is the Name of a Build which is run. + // NOTE: The value for this label may not contain the entire Build name because it will be + // truncated to maximum label length. + BuildLabel = "openshift.io/build.name" + + // BuildRunPolicyLabel represents the start policy used to start the build. + BuildRunPolicyLabel = "openshift.io/build.start-policy" + + // BuildConfigLabelDeprecated was used as BuildConfigLabel before adding namespaces. + // We keep it for backward compatibility. + BuildConfigLabelDeprecated = "buildconfig" +) + +const ( + // StatusReasonError is a generic reason for a build error condition. + StatusReasonError StatusReason = "Error" + + // StatusReasonCannotCreateBuildPodSpec is an error condition when the build + // strategy cannot create a build pod spec. + StatusReasonCannotCreateBuildPodSpec StatusReason = "CannotCreateBuildPodSpec" + + // StatusReasonCannotCreateBuildPod is an error condition when a build pod + // cannot be created. + StatusReasonCannotCreateBuildPod StatusReason = "CannotCreateBuildPod" + + // StatusReasonInvalidOutputReference is an error condition when the build + // output is an invalid reference. + StatusReasonInvalidOutputReference StatusReason = "InvalidOutputReference" + + // StatusReasonInvalidImageReference is an error condition when the build + // references an invalid image. + StatusReasonInvalidImageReference StatusReason = "InvalidImageReference" + + // StatusReasonCancelBuildFailed is an error condition when cancelling a build + // fails. + StatusReasonCancelBuildFailed StatusReason = "CancelBuildFailed" + + // StatusReasonBuildPodDeleted is an error condition when the build pod is + // deleted before build completion. + StatusReasonBuildPodDeleted StatusReason = "BuildPodDeleted" + + // StatusReasonExceededRetryTimeout is an error condition when the build has + // not completed and retrying the build times out. + StatusReasonExceededRetryTimeout StatusReason = "ExceededRetryTimeout" + + // StatusReasonMissingPushSecret indicates that the build is missing required + // secret for pushing the output image. + // The build will stay in the pending state until the secret is created, or the build times out. + StatusReasonMissingPushSecret StatusReason = "MissingPushSecret" + + // StatusReasonPostCommitHookFailed indicates the post-commit hook failed. + StatusReasonPostCommitHookFailed StatusReason = "PostCommitHookFailed" + + // StatusReasonPushImageToRegistryFailed indicates that an image failed to be + // pushed to the registry. + StatusReasonPushImageToRegistryFailed StatusReason = "PushImageToRegistryFailed" + + // StatusReasonPullBuilderImageFailed indicates that we failed to pull the + // builder image. + StatusReasonPullBuilderImageFailed StatusReason = "PullBuilderImageFailed" + + // StatusReasonFetchSourceFailed indicates that fetching the source of the + // build has failed. + StatusReasonFetchSourceFailed StatusReason = "FetchSourceFailed" + + // StatusReasonFetchImageContentFailed indicates that the fetching of an image and extracting + // its contents for inclusion in the build has failed. + StatusReasonFetchImageContentFailed StatusReason = "FetchImageContentFailed" + + // StatusReasonManageDockerfileFailed indicates that the set up of the Dockerfile for the build + // has failed. + StatusReasonManageDockerfileFailed StatusReason = "ManageDockerfileFailed" + + // StatusReasonInvalidContextDirectory indicates that the supplied + // contextDir does not exist + StatusReasonInvalidContextDirectory StatusReason = "InvalidContextDirectory" + + // StatusReasonCancelledBuild indicates that the build was cancelled by the + // user. + StatusReasonCancelledBuild StatusReason = "CancelledBuild" + + // StatusReasonDockerBuildFailed indicates that the container image build strategy has + // failed. + StatusReasonDockerBuildFailed StatusReason = "DockerBuildFailed" + + // StatusReasonBuildPodExists indicates that the build tried to create a + // build pod but one was already present. + StatusReasonBuildPodExists StatusReason = "BuildPodExists" + + // StatusReasonNoBuildContainerStatus indicates that the build failed because the + // the build pod has no container statuses. + StatusReasonNoBuildContainerStatus StatusReason = "NoBuildContainerStatus" + + // StatusReasonFailedContainer indicates that the pod for the build has at least + // one container with a non-zero exit status. + StatusReasonFailedContainer StatusReason = "FailedContainer" + + // StatusReasonUnresolvableEnvironmentVariable indicates that an error occurred processing + // the supplied options for environment variables in the build strategy environment + StatusReasonUnresolvableEnvironmentVariable StatusReason = "UnresolvableEnvironmentVariable" + + // StatusReasonGenericBuildFailed is the reason associated with a broad + // range of build failures. + StatusReasonGenericBuildFailed StatusReason = "GenericBuildFailed" + + // StatusReasonOutOfMemoryKilled indicates that the build pod was killed for its memory consumption + StatusReasonOutOfMemoryKilled StatusReason = "OutOfMemoryKilled" + + // StatusReasonCannotRetrieveServiceAccount is the reason associated with a failure + // to look up the service account associated with the BuildConfig. + StatusReasonCannotRetrieveServiceAccount StatusReason = "CannotRetrieveServiceAccount" + + // StatusReasonBuildPodEvicted is the reason a build fails due to the build pod being evicted + // from its node + StatusReasonBuildPodEvicted StatusReason = "BuildPodEvicted" +) + +// WhitelistEnvVarNames is a list of environment variable names that are allowed to be specified +// in a buildconfig and merged into the created build pods, the code for this is located in +// openshift/openshift-controller-manager +var WhitelistEnvVarNames = []string{"BUILD_LOGLEVEL", "GIT_SSL_NO_VERIFY", "GIT_LFS_SKIP_SMUDGE", "LANG", + "HTTP_PROXY", "HTTPS_PROXY", "NO_PROXY", "http_proxy", "https_proxy", "no_proxy"} + +// env vars +const ( + + // CustomBuildStrategyBaseImageKey is the environment variable that indicates the base image to be used when + // performing a custom build, if needed. + CustomBuildStrategyBaseImageKey = "OPENSHIFT_CUSTOM_BUILD_BASE_IMAGE" + + // AllowedUIDs is an environment variable that contains ranges of UIDs that are allowed in + // Source builder images + AllowedUIDs = "ALLOWED_UIDS" + // DropCapabilities is an environment variable that contains a list of capabilities to drop when + // executing a Source build + DropCapabilities = "DROP_CAPS" +) + +// keys inside of secrets and configmaps +const ( + // WebHookSecretKey is the key used to identify the value containing the webhook invocation + // secret within a secret referenced by a webhook trigger. + WebHookSecretKey = "WebHookSecretKey" + + // RegistryConfKey is the ConfigMap key for the build pod's registry configuration file. + RegistryConfKey = "registries.conf" + + // SignaturePolicyKey is the ConfigMap key for the build pod's image signature policy file. + SignaturePolicyKey = "policy.json" + + // ServiceCAKey is the ConfigMap key for the service signing certificate authority mounted into build pods. + ServiceCAKey = "service-ca.crt" +) diff --git a/vendor/github.com/openshift/api/build/v1/doc.go b/vendor/github.com/openshift/api/build/v1/doc.go new file mode 100644 index 0000000000000..9bc16f64b2359 --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/build/apis/build +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=build.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/build/v1/generated.pb.go b/vendor/github.com/openshift/api/build/v1/generated.pb.go new file mode 100644 index 0000000000000..1b026f354dc64 --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/generated.pb.go @@ -0,0 +1,17545 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/build/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *BinaryBuildRequestOptions) Reset() { *m = BinaryBuildRequestOptions{} } +func (*BinaryBuildRequestOptions) ProtoMessage() {} +func (*BinaryBuildRequestOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{0} +} +func (m *BinaryBuildRequestOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BinaryBuildRequestOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BinaryBuildRequestOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_BinaryBuildRequestOptions.Merge(m, src) +} +func (m *BinaryBuildRequestOptions) XXX_Size() int { + return m.Size() +} +func (m *BinaryBuildRequestOptions) XXX_DiscardUnknown() { + xxx_messageInfo_BinaryBuildRequestOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_BinaryBuildRequestOptions proto.InternalMessageInfo + +func (m *BinaryBuildSource) Reset() { *m = BinaryBuildSource{} } +func (*BinaryBuildSource) ProtoMessage() {} +func (*BinaryBuildSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{1} +} +func (m *BinaryBuildSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BinaryBuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BinaryBuildSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_BinaryBuildSource.Merge(m, src) +} +func (m *BinaryBuildSource) XXX_Size() int { + return m.Size() +} +func (m *BinaryBuildSource) XXX_DiscardUnknown() { + xxx_messageInfo_BinaryBuildSource.DiscardUnknown(m) +} + +var xxx_messageInfo_BinaryBuildSource proto.InternalMessageInfo + +func (m *BitbucketWebHookCause) Reset() { *m = BitbucketWebHookCause{} } +func (*BitbucketWebHookCause) ProtoMessage() {} +func (*BitbucketWebHookCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{2} +} +func (m *BitbucketWebHookCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BitbucketWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BitbucketWebHookCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_BitbucketWebHookCause.Merge(m, src) +} +func (m *BitbucketWebHookCause) XXX_Size() int { + return m.Size() +} +func (m *BitbucketWebHookCause) XXX_DiscardUnknown() { + xxx_messageInfo_BitbucketWebHookCause.DiscardUnknown(m) +} + +var xxx_messageInfo_BitbucketWebHookCause proto.InternalMessageInfo + +func (m *Build) Reset() { *m = Build{} } +func (*Build) ProtoMessage() {} +func (*Build) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{3} +} +func (m *Build) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Build) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Build) XXX_Merge(src proto.Message) { + xxx_messageInfo_Build.Merge(m, src) +} +func (m *Build) XXX_Size() int { + return m.Size() +} +func (m *Build) XXX_DiscardUnknown() { + xxx_messageInfo_Build.DiscardUnknown(m) +} + +var xxx_messageInfo_Build proto.InternalMessageInfo + +func (m *BuildCondition) Reset() { *m = BuildCondition{} } +func (*BuildCondition) ProtoMessage() {} +func (*BuildCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{4} +} +func (m *BuildCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildCondition.Merge(m, src) +} +func (m *BuildCondition) XXX_Size() int { + return m.Size() +} +func (m *BuildCondition) XXX_DiscardUnknown() { + xxx_messageInfo_BuildCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildCondition proto.InternalMessageInfo + +func (m *BuildConfig) Reset() { *m = BuildConfig{} } +func (*BuildConfig) ProtoMessage() {} +func (*BuildConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{5} +} +func (m *BuildConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildConfig.Merge(m, src) +} +func (m *BuildConfig) XXX_Size() int { + return m.Size() +} +func (m *BuildConfig) XXX_DiscardUnknown() { + xxx_messageInfo_BuildConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildConfig proto.InternalMessageInfo + +func (m *BuildConfigList) Reset() { *m = BuildConfigList{} } +func (*BuildConfigList) ProtoMessage() {} +func (*BuildConfigList) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{6} +} +func (m *BuildConfigList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildConfigList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildConfigList) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildConfigList.Merge(m, src) +} +func (m *BuildConfigList) XXX_Size() int { + return m.Size() +} +func (m *BuildConfigList) XXX_DiscardUnknown() { + xxx_messageInfo_BuildConfigList.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildConfigList proto.InternalMessageInfo + +func (m *BuildConfigSpec) Reset() { *m = BuildConfigSpec{} } +func (*BuildConfigSpec) ProtoMessage() {} +func (*BuildConfigSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{7} +} +func (m *BuildConfigSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildConfigSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildConfigSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildConfigSpec.Merge(m, src) +} +func (m *BuildConfigSpec) XXX_Size() int { + return m.Size() +} +func (m *BuildConfigSpec) XXX_DiscardUnknown() { + xxx_messageInfo_BuildConfigSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildConfigSpec proto.InternalMessageInfo + +func (m *BuildConfigStatus) Reset() { *m = BuildConfigStatus{} } +func (*BuildConfigStatus) ProtoMessage() {} +func (*BuildConfigStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{8} +} +func (m *BuildConfigStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildConfigStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildConfigStatus.Merge(m, src) +} +func (m *BuildConfigStatus) XXX_Size() int { + return m.Size() +} +func (m *BuildConfigStatus) XXX_DiscardUnknown() { + xxx_messageInfo_BuildConfigStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildConfigStatus proto.InternalMessageInfo + +func (m *BuildList) Reset() { *m = BuildList{} } +func (*BuildList) ProtoMessage() {} +func (*BuildList) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{9} +} +func (m *BuildList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildList) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildList.Merge(m, src) +} +func (m *BuildList) XXX_Size() int { + return m.Size() +} +func (m *BuildList) XXX_DiscardUnknown() { + xxx_messageInfo_BuildList.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildList proto.InternalMessageInfo + +func (m *BuildLog) Reset() { *m = BuildLog{} } +func (*BuildLog) ProtoMessage() {} +func (*BuildLog) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{10} +} +func (m *BuildLog) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildLog) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildLog.Merge(m, src) +} +func (m *BuildLog) XXX_Size() int { + return m.Size() +} +func (m *BuildLog) XXX_DiscardUnknown() { + xxx_messageInfo_BuildLog.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildLog proto.InternalMessageInfo + +func (m *BuildLogOptions) Reset() { *m = BuildLogOptions{} } +func (*BuildLogOptions) ProtoMessage() {} +func (*BuildLogOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{11} +} +func (m *BuildLogOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildLogOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildLogOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildLogOptions.Merge(m, src) +} +func (m *BuildLogOptions) XXX_Size() int { + return m.Size() +} +func (m *BuildLogOptions) XXX_DiscardUnknown() { + xxx_messageInfo_BuildLogOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildLogOptions proto.InternalMessageInfo + +func (m *BuildOutput) Reset() { *m = BuildOutput{} } +func (*BuildOutput) ProtoMessage() {} +func (*BuildOutput) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{12} +} +func (m *BuildOutput) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildOutput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildOutput) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildOutput.Merge(m, src) +} +func (m *BuildOutput) XXX_Size() int { + return m.Size() +} +func (m *BuildOutput) XXX_DiscardUnknown() { + xxx_messageInfo_BuildOutput.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildOutput proto.InternalMessageInfo + +func (m *BuildPostCommitSpec) Reset() { *m = BuildPostCommitSpec{} } +func (*BuildPostCommitSpec) ProtoMessage() {} +func (*BuildPostCommitSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{13} +} +func (m *BuildPostCommitSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildPostCommitSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildPostCommitSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildPostCommitSpec.Merge(m, src) +} +func (m *BuildPostCommitSpec) XXX_Size() int { + return m.Size() +} +func (m *BuildPostCommitSpec) XXX_DiscardUnknown() { + xxx_messageInfo_BuildPostCommitSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildPostCommitSpec proto.InternalMessageInfo + +func (m *BuildRequest) Reset() { *m = BuildRequest{} } +func (*BuildRequest) ProtoMessage() {} +func (*BuildRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{14} +} +func (m *BuildRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildRequest.Merge(m, src) +} +func (m *BuildRequest) XXX_Size() int { + return m.Size() +} +func (m *BuildRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BuildRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildRequest proto.InternalMessageInfo + +func (m *BuildSource) Reset() { *m = BuildSource{} } +func (*BuildSource) ProtoMessage() {} +func (*BuildSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{15} +} +func (m *BuildSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildSource.Merge(m, src) +} +func (m *BuildSource) XXX_Size() int { + return m.Size() +} +func (m *BuildSource) XXX_DiscardUnknown() { + xxx_messageInfo_BuildSource.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildSource proto.InternalMessageInfo + +func (m *BuildSpec) Reset() { *m = BuildSpec{} } +func (*BuildSpec) ProtoMessage() {} +func (*BuildSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{16} +} +func (m *BuildSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildSpec.Merge(m, src) +} +func (m *BuildSpec) XXX_Size() int { + return m.Size() +} +func (m *BuildSpec) XXX_DiscardUnknown() { + xxx_messageInfo_BuildSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildSpec proto.InternalMessageInfo + +func (m *BuildStatus) Reset() { *m = BuildStatus{} } +func (*BuildStatus) ProtoMessage() {} +func (*BuildStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{17} +} +func (m *BuildStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildStatus.Merge(m, src) +} +func (m *BuildStatus) XXX_Size() int { + return m.Size() +} +func (m *BuildStatus) XXX_DiscardUnknown() { + xxx_messageInfo_BuildStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildStatus proto.InternalMessageInfo + +func (m *BuildStatusOutput) Reset() { *m = BuildStatusOutput{} } +func (*BuildStatusOutput) ProtoMessage() {} +func (*BuildStatusOutput) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{18} +} +func (m *BuildStatusOutput) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildStatusOutput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildStatusOutput) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildStatusOutput.Merge(m, src) +} +func (m *BuildStatusOutput) XXX_Size() int { + return m.Size() +} +func (m *BuildStatusOutput) XXX_DiscardUnknown() { + xxx_messageInfo_BuildStatusOutput.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildStatusOutput proto.InternalMessageInfo + +func (m *BuildStatusOutputTo) Reset() { *m = BuildStatusOutputTo{} } +func (*BuildStatusOutputTo) ProtoMessage() {} +func (*BuildStatusOutputTo) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{19} +} +func (m *BuildStatusOutputTo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildStatusOutputTo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildStatusOutputTo) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildStatusOutputTo.Merge(m, src) +} +func (m *BuildStatusOutputTo) XXX_Size() int { + return m.Size() +} +func (m *BuildStatusOutputTo) XXX_DiscardUnknown() { + xxx_messageInfo_BuildStatusOutputTo.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildStatusOutputTo proto.InternalMessageInfo + +func (m *BuildStrategy) Reset() { *m = BuildStrategy{} } +func (*BuildStrategy) ProtoMessage() {} +func (*BuildStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{20} +} +func (m *BuildStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildStrategy.Merge(m, src) +} +func (m *BuildStrategy) XXX_Size() int { + return m.Size() +} +func (m *BuildStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_BuildStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildStrategy proto.InternalMessageInfo + +func (m *BuildTriggerCause) Reset() { *m = BuildTriggerCause{} } +func (*BuildTriggerCause) ProtoMessage() {} +func (*BuildTriggerCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{21} +} +func (m *BuildTriggerCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildTriggerCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildTriggerCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildTriggerCause.Merge(m, src) +} +func (m *BuildTriggerCause) XXX_Size() int { + return m.Size() +} +func (m *BuildTriggerCause) XXX_DiscardUnknown() { + xxx_messageInfo_BuildTriggerCause.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildTriggerCause proto.InternalMessageInfo + +func (m *BuildTriggerPolicy) Reset() { *m = BuildTriggerPolicy{} } +func (*BuildTriggerPolicy) ProtoMessage() {} +func (*BuildTriggerPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{22} +} +func (m *BuildTriggerPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildTriggerPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildTriggerPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildTriggerPolicy.Merge(m, src) +} +func (m *BuildTriggerPolicy) XXX_Size() int { + return m.Size() +} +func (m *BuildTriggerPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_BuildTriggerPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildTriggerPolicy proto.InternalMessageInfo + +func (m *BuildVolume) Reset() { *m = BuildVolume{} } +func (*BuildVolume) ProtoMessage() {} +func (*BuildVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{23} +} +func (m *BuildVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildVolume.Merge(m, src) +} +func (m *BuildVolume) XXX_Size() int { + return m.Size() +} +func (m *BuildVolume) XXX_DiscardUnknown() { + xxx_messageInfo_BuildVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildVolume proto.InternalMessageInfo + +func (m *BuildVolumeMount) Reset() { *m = BuildVolumeMount{} } +func (*BuildVolumeMount) ProtoMessage() {} +func (*BuildVolumeMount) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{24} +} +func (m *BuildVolumeMount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildVolumeMount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildVolumeMount) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildVolumeMount.Merge(m, src) +} +func (m *BuildVolumeMount) XXX_Size() int { + return m.Size() +} +func (m *BuildVolumeMount) XXX_DiscardUnknown() { + xxx_messageInfo_BuildVolumeMount.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildVolumeMount proto.InternalMessageInfo + +func (m *BuildVolumeSource) Reset() { *m = BuildVolumeSource{} } +func (*BuildVolumeSource) ProtoMessage() {} +func (*BuildVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{25} +} +func (m *BuildVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildVolumeSource.Merge(m, src) +} +func (m *BuildVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *BuildVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_BuildVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildVolumeSource proto.InternalMessageInfo + +func (m *CommonSpec) Reset() { *m = CommonSpec{} } +func (*CommonSpec) ProtoMessage() {} +func (*CommonSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{26} +} +func (m *CommonSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommonSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CommonSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommonSpec.Merge(m, src) +} +func (m *CommonSpec) XXX_Size() int { + return m.Size() +} +func (m *CommonSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CommonSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CommonSpec proto.InternalMessageInfo + +func (m *CommonWebHookCause) Reset() { *m = CommonWebHookCause{} } +func (*CommonWebHookCause) ProtoMessage() {} +func (*CommonWebHookCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{27} +} +func (m *CommonWebHookCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommonWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CommonWebHookCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommonWebHookCause.Merge(m, src) +} +func (m *CommonWebHookCause) XXX_Size() int { + return m.Size() +} +func (m *CommonWebHookCause) XXX_DiscardUnknown() { + xxx_messageInfo_CommonWebHookCause.DiscardUnknown(m) +} + +var xxx_messageInfo_CommonWebHookCause proto.InternalMessageInfo + +func (m *ConfigMapBuildSource) Reset() { *m = ConfigMapBuildSource{} } +func (*ConfigMapBuildSource) ProtoMessage() {} +func (*ConfigMapBuildSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{28} +} +func (m *ConfigMapBuildSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapBuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapBuildSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapBuildSource.Merge(m, src) +} +func (m *ConfigMapBuildSource) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapBuildSource) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapBuildSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigMapBuildSource proto.InternalMessageInfo + +func (m *CustomBuildStrategy) Reset() { *m = CustomBuildStrategy{} } +func (*CustomBuildStrategy) ProtoMessage() {} +func (*CustomBuildStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{29} +} +func (m *CustomBuildStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomBuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomBuildStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomBuildStrategy.Merge(m, src) +} +func (m *CustomBuildStrategy) XXX_Size() int { + return m.Size() +} +func (m *CustomBuildStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_CustomBuildStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomBuildStrategy proto.InternalMessageInfo + +func (m *DockerBuildStrategy) Reset() { *m = DockerBuildStrategy{} } +func (*DockerBuildStrategy) ProtoMessage() {} +func (*DockerBuildStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{30} +} +func (m *DockerBuildStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DockerBuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DockerBuildStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DockerBuildStrategy.Merge(m, src) +} +func (m *DockerBuildStrategy) XXX_Size() int { + return m.Size() +} +func (m *DockerBuildStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DockerBuildStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DockerBuildStrategy proto.InternalMessageInfo + +func (m *DockerStrategyOptions) Reset() { *m = DockerStrategyOptions{} } +func (*DockerStrategyOptions) ProtoMessage() {} +func (*DockerStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{31} +} +func (m *DockerStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DockerStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DockerStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_DockerStrategyOptions.Merge(m, src) +} +func (m *DockerStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *DockerStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_DockerStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_DockerStrategyOptions proto.InternalMessageInfo + +func (m *GenericWebHookCause) Reset() { *m = GenericWebHookCause{} } +func (*GenericWebHookCause) ProtoMessage() {} +func (*GenericWebHookCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{32} +} +func (m *GenericWebHookCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenericWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GenericWebHookCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenericWebHookCause.Merge(m, src) +} +func (m *GenericWebHookCause) XXX_Size() int { + return m.Size() +} +func (m *GenericWebHookCause) XXX_DiscardUnknown() { + xxx_messageInfo_GenericWebHookCause.DiscardUnknown(m) +} + +var xxx_messageInfo_GenericWebHookCause proto.InternalMessageInfo + +func (m *GenericWebHookEvent) Reset() { *m = GenericWebHookEvent{} } +func (*GenericWebHookEvent) ProtoMessage() {} +func (*GenericWebHookEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{33} +} +func (m *GenericWebHookEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenericWebHookEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GenericWebHookEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenericWebHookEvent.Merge(m, src) +} +func (m *GenericWebHookEvent) XXX_Size() int { + return m.Size() +} +func (m *GenericWebHookEvent) XXX_DiscardUnknown() { + xxx_messageInfo_GenericWebHookEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_GenericWebHookEvent proto.InternalMessageInfo + +func (m *GitBuildSource) Reset() { *m = GitBuildSource{} } +func (*GitBuildSource) ProtoMessage() {} +func (*GitBuildSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{34} +} +func (m *GitBuildSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitBuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitBuildSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitBuildSource.Merge(m, src) +} +func (m *GitBuildSource) XXX_Size() int { + return m.Size() +} +func (m *GitBuildSource) XXX_DiscardUnknown() { + xxx_messageInfo_GitBuildSource.DiscardUnknown(m) +} + +var xxx_messageInfo_GitBuildSource proto.InternalMessageInfo + +func (m *GitHubWebHookCause) Reset() { *m = GitHubWebHookCause{} } +func (*GitHubWebHookCause) ProtoMessage() {} +func (*GitHubWebHookCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{35} +} +func (m *GitHubWebHookCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitHubWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitHubWebHookCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitHubWebHookCause.Merge(m, src) +} +func (m *GitHubWebHookCause) XXX_Size() int { + return m.Size() +} +func (m *GitHubWebHookCause) XXX_DiscardUnknown() { + xxx_messageInfo_GitHubWebHookCause.DiscardUnknown(m) +} + +var xxx_messageInfo_GitHubWebHookCause proto.InternalMessageInfo + +func (m *GitInfo) Reset() { *m = GitInfo{} } +func (*GitInfo) ProtoMessage() {} +func (*GitInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{36} +} +func (m *GitInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitInfo.Merge(m, src) +} +func (m *GitInfo) XXX_Size() int { + return m.Size() +} +func (m *GitInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GitInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GitInfo proto.InternalMessageInfo + +func (m *GitLabWebHookCause) Reset() { *m = GitLabWebHookCause{} } +func (*GitLabWebHookCause) ProtoMessage() {} +func (*GitLabWebHookCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{37} +} +func (m *GitLabWebHookCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitLabWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitLabWebHookCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitLabWebHookCause.Merge(m, src) +} +func (m *GitLabWebHookCause) XXX_Size() int { + return m.Size() +} +func (m *GitLabWebHookCause) XXX_DiscardUnknown() { + xxx_messageInfo_GitLabWebHookCause.DiscardUnknown(m) +} + +var xxx_messageInfo_GitLabWebHookCause proto.InternalMessageInfo + +func (m *GitRefInfo) Reset() { *m = GitRefInfo{} } +func (*GitRefInfo) ProtoMessage() {} +func (*GitRefInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{38} +} +func (m *GitRefInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitRefInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitRefInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitRefInfo.Merge(m, src) +} +func (m *GitRefInfo) XXX_Size() int { + return m.Size() +} +func (m *GitRefInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GitRefInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GitRefInfo proto.InternalMessageInfo + +func (m *GitSourceRevision) Reset() { *m = GitSourceRevision{} } +func (*GitSourceRevision) ProtoMessage() {} +func (*GitSourceRevision) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{39} +} +func (m *GitSourceRevision) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitSourceRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitSourceRevision) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitSourceRevision.Merge(m, src) +} +func (m *GitSourceRevision) XXX_Size() int { + return m.Size() +} +func (m *GitSourceRevision) XXX_DiscardUnknown() { + xxx_messageInfo_GitSourceRevision.DiscardUnknown(m) +} + +var xxx_messageInfo_GitSourceRevision proto.InternalMessageInfo + +func (m *ImageChangeCause) Reset() { *m = ImageChangeCause{} } +func (*ImageChangeCause) ProtoMessage() {} +func (*ImageChangeCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{40} +} +func (m *ImageChangeCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageChangeCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageChangeCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageChangeCause.Merge(m, src) +} +func (m *ImageChangeCause) XXX_Size() int { + return m.Size() +} +func (m *ImageChangeCause) XXX_DiscardUnknown() { + xxx_messageInfo_ImageChangeCause.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageChangeCause proto.InternalMessageInfo + +func (m *ImageChangeTrigger) Reset() { *m = ImageChangeTrigger{} } +func (*ImageChangeTrigger) ProtoMessage() {} +func (*ImageChangeTrigger) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{41} +} +func (m *ImageChangeTrigger) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageChangeTrigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageChangeTrigger) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageChangeTrigger.Merge(m, src) +} +func (m *ImageChangeTrigger) XXX_Size() int { + return m.Size() +} +func (m *ImageChangeTrigger) XXX_DiscardUnknown() { + xxx_messageInfo_ImageChangeTrigger.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageChangeTrigger proto.InternalMessageInfo + +func (m *ImageChangeTriggerStatus) Reset() { *m = ImageChangeTriggerStatus{} } +func (*ImageChangeTriggerStatus) ProtoMessage() {} +func (*ImageChangeTriggerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{42} +} +func (m *ImageChangeTriggerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageChangeTriggerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageChangeTriggerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageChangeTriggerStatus.Merge(m, src) +} +func (m *ImageChangeTriggerStatus) XXX_Size() int { + return m.Size() +} +func (m *ImageChangeTriggerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ImageChangeTriggerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageChangeTriggerStatus proto.InternalMessageInfo + +func (m *ImageLabel) Reset() { *m = ImageLabel{} } +func (*ImageLabel) ProtoMessage() {} +func (*ImageLabel) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{43} +} +func (m *ImageLabel) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageLabel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageLabel) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageLabel.Merge(m, src) +} +func (m *ImageLabel) XXX_Size() int { + return m.Size() +} +func (m *ImageLabel) XXX_DiscardUnknown() { + xxx_messageInfo_ImageLabel.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageLabel proto.InternalMessageInfo + +func (m *ImageSource) Reset() { *m = ImageSource{} } +func (*ImageSource) ProtoMessage() {} +func (*ImageSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{44} +} +func (m *ImageSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageSource.Merge(m, src) +} +func (m *ImageSource) XXX_Size() int { + return m.Size() +} +func (m *ImageSource) XXX_DiscardUnknown() { + xxx_messageInfo_ImageSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageSource proto.InternalMessageInfo + +func (m *ImageSourcePath) Reset() { *m = ImageSourcePath{} } +func (*ImageSourcePath) ProtoMessage() {} +func (*ImageSourcePath) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{45} +} +func (m *ImageSourcePath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageSourcePath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageSourcePath) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageSourcePath.Merge(m, src) +} +func (m *ImageSourcePath) XXX_Size() int { + return m.Size() +} +func (m *ImageSourcePath) XXX_DiscardUnknown() { + xxx_messageInfo_ImageSourcePath.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageSourcePath proto.InternalMessageInfo + +func (m *ImageStreamTagReference) Reset() { *m = ImageStreamTagReference{} } +func (*ImageStreamTagReference) ProtoMessage() {} +func (*ImageStreamTagReference) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{46} +} +func (m *ImageStreamTagReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamTagReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamTagReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamTagReference.Merge(m, src) +} +func (m *ImageStreamTagReference) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamTagReference) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamTagReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamTagReference proto.InternalMessageInfo + +func (m *JenkinsPipelineBuildStrategy) Reset() { *m = JenkinsPipelineBuildStrategy{} } +func (*JenkinsPipelineBuildStrategy) ProtoMessage() {} +func (*JenkinsPipelineBuildStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{47} +} +func (m *JenkinsPipelineBuildStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JenkinsPipelineBuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JenkinsPipelineBuildStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_JenkinsPipelineBuildStrategy.Merge(m, src) +} +func (m *JenkinsPipelineBuildStrategy) XXX_Size() int { + return m.Size() +} +func (m *JenkinsPipelineBuildStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_JenkinsPipelineBuildStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_JenkinsPipelineBuildStrategy proto.InternalMessageInfo + +func (m *OptionalNodeSelector) Reset() { *m = OptionalNodeSelector{} } +func (*OptionalNodeSelector) ProtoMessage() {} +func (*OptionalNodeSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{48} +} +func (m *OptionalNodeSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OptionalNodeSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OptionalNodeSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_OptionalNodeSelector.Merge(m, src) +} +func (m *OptionalNodeSelector) XXX_Size() int { + return m.Size() +} +func (m *OptionalNodeSelector) XXX_DiscardUnknown() { + xxx_messageInfo_OptionalNodeSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_OptionalNodeSelector proto.InternalMessageInfo + +func (m *ProxyConfig) Reset() { *m = ProxyConfig{} } +func (*ProxyConfig) ProtoMessage() {} +func (*ProxyConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{49} +} +func (m *ProxyConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProxyConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProxyConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProxyConfig.Merge(m, src) +} +func (m *ProxyConfig) XXX_Size() int { + return m.Size() +} +func (m *ProxyConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ProxyConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ProxyConfig proto.InternalMessageInfo + +func (m *SecretBuildSource) Reset() { *m = SecretBuildSource{} } +func (*SecretBuildSource) ProtoMessage() {} +func (*SecretBuildSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{50} +} +func (m *SecretBuildSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretBuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretBuildSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretBuildSource.Merge(m, src) +} +func (m *SecretBuildSource) XXX_Size() int { + return m.Size() +} +func (m *SecretBuildSource) XXX_DiscardUnknown() { + xxx_messageInfo_SecretBuildSource.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretBuildSource proto.InternalMessageInfo + +func (m *SecretLocalReference) Reset() { *m = SecretLocalReference{} } +func (*SecretLocalReference) ProtoMessage() {} +func (*SecretLocalReference) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{51} +} +func (m *SecretLocalReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretLocalReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretLocalReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretLocalReference.Merge(m, src) +} +func (m *SecretLocalReference) XXX_Size() int { + return m.Size() +} +func (m *SecretLocalReference) XXX_DiscardUnknown() { + xxx_messageInfo_SecretLocalReference.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretLocalReference proto.InternalMessageInfo + +func (m *SecretSpec) Reset() { *m = SecretSpec{} } +func (*SecretSpec) ProtoMessage() {} +func (*SecretSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{52} +} +func (m *SecretSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretSpec.Merge(m, src) +} +func (m *SecretSpec) XXX_Size() int { + return m.Size() +} +func (m *SecretSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SecretSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretSpec proto.InternalMessageInfo + +func (m *SourceBuildStrategy) Reset() { *m = SourceBuildStrategy{} } +func (*SourceBuildStrategy) ProtoMessage() {} +func (*SourceBuildStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{53} +} +func (m *SourceBuildStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceBuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SourceBuildStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceBuildStrategy.Merge(m, src) +} +func (m *SourceBuildStrategy) XXX_Size() int { + return m.Size() +} +func (m *SourceBuildStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_SourceBuildStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceBuildStrategy proto.InternalMessageInfo + +func (m *SourceControlUser) Reset() { *m = SourceControlUser{} } +func (*SourceControlUser) ProtoMessage() {} +func (*SourceControlUser) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{54} +} +func (m *SourceControlUser) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceControlUser) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SourceControlUser) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceControlUser.Merge(m, src) +} +func (m *SourceControlUser) XXX_Size() int { + return m.Size() +} +func (m *SourceControlUser) XXX_DiscardUnknown() { + xxx_messageInfo_SourceControlUser.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceControlUser proto.InternalMessageInfo + +func (m *SourceRevision) Reset() { *m = SourceRevision{} } +func (*SourceRevision) ProtoMessage() {} +func (*SourceRevision) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{55} +} +func (m *SourceRevision) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SourceRevision) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceRevision.Merge(m, src) +} +func (m *SourceRevision) XXX_Size() int { + return m.Size() +} +func (m *SourceRevision) XXX_DiscardUnknown() { + xxx_messageInfo_SourceRevision.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceRevision proto.InternalMessageInfo + +func (m *SourceStrategyOptions) Reset() { *m = SourceStrategyOptions{} } +func (*SourceStrategyOptions) ProtoMessage() {} +func (*SourceStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{56} +} +func (m *SourceStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SourceStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceStrategyOptions.Merge(m, src) +} +func (m *SourceStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SourceStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SourceStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceStrategyOptions proto.InternalMessageInfo + +func (m *StageInfo) Reset() { *m = StageInfo{} } +func (*StageInfo) ProtoMessage() {} +func (*StageInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{57} +} +func (m *StageInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StageInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StageInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_StageInfo.Merge(m, src) +} +func (m *StageInfo) XXX_Size() int { + return m.Size() +} +func (m *StageInfo) XXX_DiscardUnknown() { + xxx_messageInfo_StageInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_StageInfo proto.InternalMessageInfo + +func (m *StepInfo) Reset() { *m = StepInfo{} } +func (*StepInfo) ProtoMessage() {} +func (*StepInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{58} +} +func (m *StepInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StepInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StepInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_StepInfo.Merge(m, src) +} +func (m *StepInfo) XXX_Size() int { + return m.Size() +} +func (m *StepInfo) XXX_DiscardUnknown() { + xxx_messageInfo_StepInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_StepInfo proto.InternalMessageInfo + +func (m *WebHookTrigger) Reset() { *m = WebHookTrigger{} } +func (*WebHookTrigger) ProtoMessage() {} +func (*WebHookTrigger) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{59} +} +func (m *WebHookTrigger) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebHookTrigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebHookTrigger) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebHookTrigger.Merge(m, src) +} +func (m *WebHookTrigger) XXX_Size() int { + return m.Size() +} +func (m *WebHookTrigger) XXX_DiscardUnknown() { + xxx_messageInfo_WebHookTrigger.DiscardUnknown(m) +} + +var xxx_messageInfo_WebHookTrigger proto.InternalMessageInfo + +func init() { + proto.RegisterType((*BinaryBuildRequestOptions)(nil), "github.com.openshift.api.build.v1.BinaryBuildRequestOptions") + proto.RegisterType((*BinaryBuildSource)(nil), "github.com.openshift.api.build.v1.BinaryBuildSource") + proto.RegisterType((*BitbucketWebHookCause)(nil), "github.com.openshift.api.build.v1.BitbucketWebHookCause") + proto.RegisterType((*Build)(nil), "github.com.openshift.api.build.v1.Build") + proto.RegisterType((*BuildCondition)(nil), "github.com.openshift.api.build.v1.BuildCondition") + proto.RegisterType((*BuildConfig)(nil), "github.com.openshift.api.build.v1.BuildConfig") + proto.RegisterType((*BuildConfigList)(nil), "github.com.openshift.api.build.v1.BuildConfigList") + proto.RegisterType((*BuildConfigSpec)(nil), "github.com.openshift.api.build.v1.BuildConfigSpec") + proto.RegisterType((*BuildConfigStatus)(nil), "github.com.openshift.api.build.v1.BuildConfigStatus") + proto.RegisterType((*BuildList)(nil), "github.com.openshift.api.build.v1.BuildList") + proto.RegisterType((*BuildLog)(nil), "github.com.openshift.api.build.v1.BuildLog") + proto.RegisterType((*BuildLogOptions)(nil), "github.com.openshift.api.build.v1.BuildLogOptions") + proto.RegisterType((*BuildOutput)(nil), "github.com.openshift.api.build.v1.BuildOutput") + proto.RegisterType((*BuildPostCommitSpec)(nil), "github.com.openshift.api.build.v1.BuildPostCommitSpec") + proto.RegisterType((*BuildRequest)(nil), "github.com.openshift.api.build.v1.BuildRequest") + proto.RegisterType((*BuildSource)(nil), "github.com.openshift.api.build.v1.BuildSource") + proto.RegisterType((*BuildSpec)(nil), "github.com.openshift.api.build.v1.BuildSpec") + proto.RegisterType((*BuildStatus)(nil), "github.com.openshift.api.build.v1.BuildStatus") + proto.RegisterType((*BuildStatusOutput)(nil), "github.com.openshift.api.build.v1.BuildStatusOutput") + proto.RegisterType((*BuildStatusOutputTo)(nil), "github.com.openshift.api.build.v1.BuildStatusOutputTo") + proto.RegisterType((*BuildStrategy)(nil), "github.com.openshift.api.build.v1.BuildStrategy") + proto.RegisterType((*BuildTriggerCause)(nil), "github.com.openshift.api.build.v1.BuildTriggerCause") + proto.RegisterType((*BuildTriggerPolicy)(nil), "github.com.openshift.api.build.v1.BuildTriggerPolicy") + proto.RegisterType((*BuildVolume)(nil), "github.com.openshift.api.build.v1.BuildVolume") + proto.RegisterType((*BuildVolumeMount)(nil), "github.com.openshift.api.build.v1.BuildVolumeMount") + proto.RegisterType((*BuildVolumeSource)(nil), "github.com.openshift.api.build.v1.BuildVolumeSource") + proto.RegisterType((*CommonSpec)(nil), "github.com.openshift.api.build.v1.CommonSpec") + proto.RegisterType((*CommonWebHookCause)(nil), "github.com.openshift.api.build.v1.CommonWebHookCause") + proto.RegisterType((*ConfigMapBuildSource)(nil), "github.com.openshift.api.build.v1.ConfigMapBuildSource") + proto.RegisterType((*CustomBuildStrategy)(nil), "github.com.openshift.api.build.v1.CustomBuildStrategy") + proto.RegisterType((*DockerBuildStrategy)(nil), "github.com.openshift.api.build.v1.DockerBuildStrategy") + proto.RegisterType((*DockerStrategyOptions)(nil), "github.com.openshift.api.build.v1.DockerStrategyOptions") + proto.RegisterType((*GenericWebHookCause)(nil), "github.com.openshift.api.build.v1.GenericWebHookCause") + proto.RegisterType((*GenericWebHookEvent)(nil), "github.com.openshift.api.build.v1.GenericWebHookEvent") + proto.RegisterType((*GitBuildSource)(nil), "github.com.openshift.api.build.v1.GitBuildSource") + proto.RegisterType((*GitHubWebHookCause)(nil), "github.com.openshift.api.build.v1.GitHubWebHookCause") + proto.RegisterType((*GitInfo)(nil), "github.com.openshift.api.build.v1.GitInfo") + proto.RegisterType((*GitLabWebHookCause)(nil), "github.com.openshift.api.build.v1.GitLabWebHookCause") + proto.RegisterType((*GitRefInfo)(nil), "github.com.openshift.api.build.v1.GitRefInfo") + proto.RegisterType((*GitSourceRevision)(nil), "github.com.openshift.api.build.v1.GitSourceRevision") + proto.RegisterType((*ImageChangeCause)(nil), "github.com.openshift.api.build.v1.ImageChangeCause") + proto.RegisterType((*ImageChangeTrigger)(nil), "github.com.openshift.api.build.v1.ImageChangeTrigger") + proto.RegisterType((*ImageChangeTriggerStatus)(nil), "github.com.openshift.api.build.v1.ImageChangeTriggerStatus") + proto.RegisterType((*ImageLabel)(nil), "github.com.openshift.api.build.v1.ImageLabel") + proto.RegisterType((*ImageSource)(nil), "github.com.openshift.api.build.v1.ImageSource") + proto.RegisterType((*ImageSourcePath)(nil), "github.com.openshift.api.build.v1.ImageSourcePath") + proto.RegisterType((*ImageStreamTagReference)(nil), "github.com.openshift.api.build.v1.ImageStreamTagReference") + proto.RegisterType((*JenkinsPipelineBuildStrategy)(nil), "github.com.openshift.api.build.v1.JenkinsPipelineBuildStrategy") + proto.RegisterType((*OptionalNodeSelector)(nil), "github.com.openshift.api.build.v1.OptionalNodeSelector") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.build.v1.OptionalNodeSelector.ItemsEntry") + proto.RegisterType((*ProxyConfig)(nil), "github.com.openshift.api.build.v1.ProxyConfig") + proto.RegisterType((*SecretBuildSource)(nil), "github.com.openshift.api.build.v1.SecretBuildSource") + proto.RegisterType((*SecretLocalReference)(nil), "github.com.openshift.api.build.v1.SecretLocalReference") + proto.RegisterType((*SecretSpec)(nil), "github.com.openshift.api.build.v1.SecretSpec") + proto.RegisterType((*SourceBuildStrategy)(nil), "github.com.openshift.api.build.v1.SourceBuildStrategy") + proto.RegisterType((*SourceControlUser)(nil), "github.com.openshift.api.build.v1.SourceControlUser") + proto.RegisterType((*SourceRevision)(nil), "github.com.openshift.api.build.v1.SourceRevision") + proto.RegisterType((*SourceStrategyOptions)(nil), "github.com.openshift.api.build.v1.SourceStrategyOptions") + proto.RegisterType((*StageInfo)(nil), "github.com.openshift.api.build.v1.StageInfo") + proto.RegisterType((*StepInfo)(nil), "github.com.openshift.api.build.v1.StepInfo") + proto.RegisterType((*WebHookTrigger)(nil), "github.com.openshift.api.build.v1.WebHookTrigger") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/build/v1/generated.proto", fileDescriptor_2ba579f6f004cb75) +} + +var fileDescriptor_2ba579f6f004cb75 = []byte{ + // 4386 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5c, 0x4d, 0x6c, 0x1c, 0x47, + 0x76, 0x56, 0xcf, 0x0f, 0x67, 0xe6, 0x0d, 0x45, 0x52, 0x45, 0xc9, 0x1a, 0x69, 0xb5, 0x1c, 0xb9, + 0x1d, 0x1b, 0x76, 0x6c, 0x0f, 0x57, 0xb2, 0xa4, 0xc8, 0x36, 0xe2, 0x80, 0x43, 0x52, 0x32, 0xb5, + 0x23, 0x89, 0xa8, 0xa1, 0x65, 0xef, 0x5a, 0xd8, 0xa4, 0xd9, 0x53, 0x33, 0x6c, 0x73, 0xa6, 0x7b, + 0xdc, 0xd5, 0x43, 0x9b, 0x0b, 0x04, 0x58, 0x04, 0x58, 0x24, 0xeb, 0xbd, 0x64, 0x2f, 0x8b, 0x24, + 0x97, 0x24, 0x58, 0xe4, 0x94, 0x53, 0x02, 0x04, 0xd8, 0x60, 0x2f, 0x01, 0xb2, 0x07, 0x1f, 0x12, + 0x60, 0x83, 0x04, 0x88, 0x81, 0x5d, 0x0c, 0x62, 0xe6, 0x10, 0x20, 0x87, 0x00, 0xb9, 0xea, 0x10, + 0x04, 0xf5, 0xd3, 0xdd, 0x55, 0x3d, 0x3d, 0x54, 0x0f, 0x25, 0x3b, 0x9b, 0xe4, 0xc6, 0xa9, 0xf7, + 0xde, 0xf7, 0xea, 0xe7, 0xd5, 0xab, 0xf7, 0x5e, 0x55, 0x13, 0xae, 0xf4, 0x9c, 0x60, 0x6f, 0xb4, + 0xdb, 0xb0, 0xbd, 0xc1, 0xaa, 0x37, 0x24, 0x2e, 0xdd, 0x73, 0xba, 0xc1, 0xaa, 0x35, 0x74, 0x56, + 0x77, 0x47, 0x4e, 0xbf, 0xb3, 0x7a, 0x70, 0x65, 0xb5, 0x47, 0x5c, 0xe2, 0x5b, 0x01, 0xe9, 0x34, + 0x86, 0xbe, 0x17, 0x78, 0xe8, 0xd9, 0x58, 0xa4, 0x11, 0x89, 0x34, 0xac, 0xa1, 0xd3, 0xe0, 0x22, + 0x8d, 0x83, 0x2b, 0x17, 0x5f, 0x55, 0x50, 0x7b, 0x5e, 0xcf, 0x5b, 0xe5, 0x92, 0xbb, 0xa3, 0x2e, + 0xff, 0xc5, 0x7f, 0xf0, 0xbf, 0x04, 0xe2, 0x45, 0x73, 0xff, 0x26, 0x6d, 0x38, 0x1e, 0x57, 0x6b, + 0x7b, 0x3e, 0x49, 0xd1, 0x7a, 0xf1, 0x5a, 0xcc, 0x33, 0xb0, 0xec, 0x3d, 0xc7, 0x25, 0xfe, 0xe1, + 0xea, 0x70, 0xbf, 0xc7, 0x1a, 0xe8, 0xea, 0x80, 0x04, 0x56, 0x9a, 0xd4, 0x8d, 0x69, 0x52, 0xfe, + 0xc8, 0x0d, 0x9c, 0x01, 0x59, 0xa5, 0xf6, 0x1e, 0x19, 0x58, 0x49, 0x39, 0xf3, 0x6f, 0x0a, 0x70, + 0xa1, 0xe9, 0xb8, 0x96, 0x7f, 0xd8, 0x64, 0x63, 0xc2, 0xe4, 0xc3, 0x11, 0xa1, 0xc1, 0xfd, 0x61, + 0xe0, 0x78, 0x2e, 0x45, 0xbf, 0x05, 0x65, 0xa6, 0xb0, 0x63, 0x05, 0x56, 0xcd, 0xb8, 0x6c, 0xbc, + 0x58, 0xbd, 0xfa, 0xb5, 0x86, 0x50, 0xd4, 0x50, 0x15, 0x35, 0x86, 0xfb, 0x3d, 0xd6, 0x40, 0x1b, + 0x8c, 0xbb, 0x71, 0x70, 0xa5, 0x71, 0x7f, 0xf7, 0x03, 0x62, 0x07, 0x77, 0x49, 0x60, 0x35, 0xd1, + 0xa7, 0xe3, 0xfa, 0xa9, 0xa3, 0x71, 0x1d, 0xe2, 0x36, 0x1c, 0xa1, 0xa2, 0x17, 0x60, 0xce, 0xa2, + 0xb7, 0x9c, 0x3e, 0xa9, 0xe5, 0x2e, 0x1b, 0x2f, 0x56, 0x9a, 0x0b, 0x92, 0x7b, 0x6e, 0x8d, 0xb7, + 0x62, 0x49, 0x45, 0x37, 0x60, 0xc1, 0x27, 0x07, 0x0e, 0x75, 0x3c, 0x77, 0xdd, 0x1b, 0x0c, 0x9c, + 0xa0, 0x96, 0xd7, 0xf9, 0x45, 0x2b, 0x4e, 0x70, 0xa1, 0xd7, 0x61, 0x31, 0x6c, 0xb9, 0x4b, 0x28, + 0xb5, 0x7a, 0xa4, 0x56, 0xe0, 0x82, 0x8b, 0x52, 0xb0, 0x24, 0x9b, 0x71, 0x92, 0x0f, 0x35, 0x01, + 0x85, 0x4d, 0x6b, 0xa3, 0x60, 0xcf, 0xf3, 0xef, 0x59, 0x03, 0x52, 0x2b, 0x72, 0xe9, 0x68, 0x50, + 0x31, 0x05, 0xa7, 0x70, 0xa3, 0x4d, 0x58, 0xd6, 0x5b, 0x37, 0x07, 0x96, 0xd3, 0xaf, 0xcd, 0x71, + 0x90, 0x65, 0x09, 0x52, 0x55, 0x48, 0x38, 0x8d, 0x1f, 0x7d, 0x1d, 0xce, 0xe9, 0xe3, 0x0a, 0x88, + 0xe8, 0x4d, 0x89, 0x03, 0x9d, 0x93, 0x40, 0xa7, 0x35, 0x22, 0x4e, 0x97, 0x41, 0xf7, 0xe0, 0x99, + 0x09, 0x82, 0xe8, 0x56, 0x99, 0xa3, 0x3d, 0x23, 0xd1, 0x16, 0x74, 0x2a, 0x9e, 0x22, 0x65, 0xbe, + 0x09, 0x67, 0x14, 0x0b, 0x6a, 0x7b, 0x23, 0xdf, 0x26, 0xca, 0xba, 0x1a, 0xc7, 0xad, 0xab, 0xf9, + 0x89, 0x01, 0xe7, 0x9a, 0x4e, 0xb0, 0x3b, 0xb2, 0xf7, 0x49, 0xf0, 0x2e, 0xd9, 0x7d, 0xdb, 0xf3, + 0xf6, 0xd7, 0xad, 0x11, 0x25, 0xe8, 0x43, 0x00, 0xdb, 0x1b, 0x0c, 0x3c, 0xb7, 0x3d, 0x24, 0xb6, + 0xb4, 0xbe, 0xeb, 0x8d, 0xc7, 0x6e, 0xc9, 0xc6, 0x3a, 0x17, 0x52, 0xa1, 0x9a, 0x17, 0xa5, 0x72, + 0x34, 0x49, 0xc3, 0x8a, 0x12, 0xf3, 0x07, 0x39, 0x28, 0xf2, 0x41, 0x7c, 0x09, 0x86, 0x7f, 0x0f, + 0x0a, 0x94, 0x0d, 0x2c, 0xc7, 0xd1, 0x5f, 0xc9, 0x30, 0x30, 0x31, 0xbd, 0x43, 0x62, 0x37, 0xe7, + 0x25, 0x72, 0x81, 0xfd, 0xc2, 0x1c, 0x07, 0x3d, 0x80, 0x39, 0x1a, 0x58, 0xc1, 0x88, 0xf2, 0x8d, + 0x51, 0xbd, 0xda, 0xc8, 0x8c, 0xc8, 0xa5, 0xe2, 0x05, 0x12, 0xbf, 0xb1, 0x44, 0x33, 0xff, 0x3e, + 0x0f, 0x0b, 0x9c, 0x6f, 0xdd, 0x73, 0x3b, 0x0e, 0x73, 0x0b, 0xe8, 0x06, 0x14, 0x82, 0xc3, 0x61, + 0xb8, 0xb2, 0x66, 0xd8, 0x99, 0x9d, 0xc3, 0x21, 0x79, 0x34, 0xae, 0x23, 0x9d, 0x9b, 0xb5, 0x62, + 0xce, 0x8f, 0x5a, 0x51, 0x17, 0xc5, 0x5e, 0xbf, 0xa6, 0xab, 0x7c, 0x34, 0xae, 0xa7, 0xf8, 0xc7, + 0x46, 0x84, 0xa4, 0x77, 0x0c, 0x7d, 0x00, 0x0b, 0x7d, 0x8b, 0x06, 0xef, 0x0c, 0x3b, 0x56, 0x40, + 0x76, 0x9c, 0x01, 0xe1, 0xbb, 0xaa, 0x7a, 0xf5, 0x57, 0xb3, 0x2d, 0x14, 0x93, 0x88, 0x4d, 0xbd, + 0xa5, 0x21, 0xe1, 0x04, 0x32, 0x3a, 0x00, 0xc4, 0x5a, 0x76, 0x7c, 0xcb, 0xa5, 0x62, 0x54, 0x4c, + 0x5f, 0x7e, 0x66, 0x7d, 0x91, 0x21, 0xb6, 0x26, 0xd0, 0x70, 0x8a, 0x06, 0xb6, 0x8b, 0x7c, 0x62, + 0x51, 0xcf, 0x95, 0x4e, 0x2b, 0x5a, 0x24, 0xcc, 0x5b, 0xb1, 0xa4, 0xa2, 0x97, 0xa0, 0x34, 0x90, + 0xde, 0xad, 0x98, 0xee, 0xdd, 0x42, 0xba, 0xf9, 0xa3, 0x1c, 0x54, 0xc3, 0x15, 0xea, 0x3a, 0xbd, + 0x2f, 0xc1, 0xd2, 0x77, 0x34, 0x4b, 0xbf, 0x9a, 0xd5, 0x2e, 0x45, 0xff, 0xa6, 0xda, 0xfb, 0xc3, + 0x84, 0xbd, 0x5f, 0x9b, 0x11, 0xf7, 0x78, 0xab, 0xff, 0xa9, 0x01, 0x8b, 0x0a, 0x77, 0xcb, 0xa1, + 0x01, 0x7a, 0x38, 0x31, 0x53, 0x8d, 0x6c, 0x33, 0xc5, 0xa4, 0xf9, 0x3c, 0x2d, 0x49, 0x6d, 0xe5, + 0xb0, 0x45, 0x99, 0xa5, 0x36, 0x14, 0x9d, 0x80, 0x0c, 0xd8, 0xde, 0xc8, 0xcf, 0xb2, 0x7d, 0x45, + 0x07, 0x9b, 0xa7, 0x25, 0x74, 0x71, 0x8b, 0x81, 0x60, 0x81, 0x65, 0xfe, 0x22, 0xaf, 0x0d, 0x83, + 0x4d, 0x1f, 0xb2, 0xa1, 0x1c, 0xf8, 0x4e, 0xaf, 0x47, 0x7c, 0x5a, 0x33, 0xb8, 0xae, 0xeb, 0x59, + 0x75, 0xed, 0x08, 0xb9, 0x6d, 0xaf, 0xef, 0xd8, 0x87, 0xf1, 0x68, 0x64, 0x33, 0xc5, 0x11, 0x30, + 0x5a, 0x83, 0x8a, 0x3f, 0x72, 0x05, 0xa3, 0xdc, 0xed, 0xcf, 0x49, 0xf6, 0x0a, 0x0e, 0x09, 0x8f, + 0xc6, 0x75, 0xe1, 0x5a, 0xa2, 0x16, 0x1c, 0x4b, 0x21, 0x4b, 0xf3, 0xff, 0x62, 0x91, 0x5f, 0xcd, + 0xec, 0xff, 0xb9, 0xdd, 0x44, 0x76, 0x19, 0xb7, 0xa9, 0xfe, 0x1e, 0x75, 0xe0, 0x12, 0x1d, 0xd9, + 0x36, 0xa1, 0xb4, 0x3b, 0xea, 0xf3, 0x9e, 0xd0, 0xb7, 0x1d, 0x1a, 0x78, 0xfe, 0x61, 0xcb, 0x61, + 0x21, 0x06, 0xdb, 0x74, 0xc5, 0xe6, 0xe5, 0xa3, 0x71, 0xfd, 0x52, 0xfb, 0x18, 0x3e, 0x7c, 0x2c, + 0x0a, 0x7a, 0x0f, 0x6a, 0x5d, 0xcb, 0xe9, 0x93, 0x4e, 0x8a, 0x86, 0x22, 0xd7, 0x70, 0xe9, 0x68, + 0x5c, 0xaf, 0xdd, 0x9a, 0xc2, 0x83, 0xa7, 0x4a, 0x9b, 0xff, 0x6c, 0xc0, 0x99, 0x09, 0x9b, 0x46, + 0xd7, 0xa1, 0xca, 0x5c, 0xc9, 0x03, 0xe2, 0xb3, 0xc3, 0x9a, 0x9b, 0x6a, 0x3e, 0x8e, 0x35, 0x5a, + 0x31, 0x09, 0xab, 0x7c, 0xe8, 0x13, 0x03, 0x96, 0x9d, 0x81, 0xd5, 0x23, 0xeb, 0x7b, 0x96, 0xdb, + 0x23, 0xe1, 0xa2, 0x4a, 0x7b, 0x7c, 0x33, 0xc3, 0xcc, 0x6f, 0x4d, 0x48, 0xcb, 0x5d, 0xf6, 0x15, + 0xa9, 0x7c, 0x79, 0x92, 0x83, 0xe2, 0x34, 0xa5, 0xe6, 0x8f, 0x0d, 0xa8, 0xf0, 0x91, 0x7d, 0x09, + 0x3b, 0xef, 0xae, 0xbe, 0xf3, 0x5e, 0xcc, 0xba, 0x1b, 0xa6, 0xec, 0x39, 0x80, 0xb2, 0xe8, 0xb9, + 0xd7, 0x33, 0xff, 0xb3, 0x20, 0xf7, 0x5f, 0xcb, 0xeb, 0x85, 0x31, 0xf5, 0x2a, 0x54, 0x6c, 0xcf, + 0x0d, 0x2c, 0xd6, 0x65, 0x79, 0x84, 0x9e, 0x09, 0xb7, 0xc6, 0x7a, 0x48, 0xc0, 0x31, 0x0f, 0x3b, + 0x04, 0xba, 0x5e, 0xbf, 0xef, 0x7d, 0xc4, 0x37, 0x52, 0x39, 0xf6, 0x59, 0xb7, 0x78, 0x2b, 0x96, + 0x54, 0xf4, 0x0a, 0x94, 0x87, 0x2c, 0x44, 0xf3, 0xa4, 0x4f, 0x2c, 0xc7, 0xa3, 0xde, 0x96, 0xed, + 0x38, 0xe2, 0x40, 0xd7, 0x60, 0x9e, 0x3a, 0xae, 0x4d, 0xda, 0xc4, 0xf6, 0xdc, 0x0e, 0xe5, 0xb6, + 0x9e, 0x6f, 0x2e, 0x1d, 0x8d, 0xeb, 0xf3, 0x6d, 0xa5, 0x1d, 0x6b, 0x5c, 0xe8, 0x5d, 0xa8, 0xf0, + 0xdf, 0xfc, 0xfc, 0x2b, 0xce, 0x7c, 0xfe, 0x9d, 0x66, 0x83, 0x6c, 0x87, 0x00, 0x38, 0xc6, 0x42, + 0x57, 0x01, 0x58, 0x9a, 0x42, 0x03, 0x6b, 0x30, 0xa4, 0xfc, 0x24, 0x2f, 0xc7, 0xdb, 0x77, 0x27, + 0xa2, 0x60, 0x85, 0x0b, 0xbd, 0x0c, 0x95, 0xc0, 0x72, 0xfa, 0x2d, 0xc7, 0x25, 0x94, 0x47, 0xc2, + 0x79, 0xa1, 0x60, 0x27, 0x6c, 0xc4, 0x31, 0x1d, 0x35, 0x00, 0xfa, 0x6c, 0xd3, 0x34, 0x0f, 0x03, + 0x42, 0x79, 0xa4, 0x9b, 0x6f, 0x2e, 0x30, 0xf0, 0x56, 0xd4, 0x8a, 0x15, 0x0e, 0x36, 0xeb, 0xae, + 0xf7, 0x91, 0xe5, 0x04, 0xb5, 0x8a, 0x3e, 0xeb, 0xf7, 0xbc, 0x77, 0x2d, 0x27, 0xc0, 0x92, 0x8a, + 0x9e, 0x87, 0xd2, 0x81, 0xdc, 0x69, 0xc0, 0x41, 0xab, 0xec, 0xd8, 0x0d, 0x77, 0x58, 0x48, 0x43, + 0x7b, 0x70, 0xc9, 0x71, 0x29, 0xb1, 0x47, 0x3e, 0x69, 0xef, 0x3b, 0xc3, 0x9d, 0x56, 0xfb, 0x01, + 0xf1, 0x9d, 0xee, 0x61, 0xd3, 0xb2, 0xf7, 0x89, 0xdb, 0xa9, 0x55, 0xb9, 0x92, 0x5f, 0x91, 0x4a, + 0x2e, 0x6d, 0x1d, 0xc3, 0x8b, 0x8f, 0x45, 0x32, 0x3f, 0x09, 0x0f, 0xf8, 0xfb, 0xa3, 0x60, 0x38, + 0x0a, 0xd0, 0x9b, 0x90, 0x0b, 0x3c, 0xb9, 0x6d, 0x9e, 0x53, 0xd6, 0xaa, 0xc1, 0x02, 0xac, 0xf8, + 0x20, 0xc7, 0xa4, 0x4b, 0x7c, 0xe2, 0xda, 0xa4, 0x39, 0x77, 0x34, 0xae, 0xe7, 0x76, 0x3c, 0x9c, + 0x0b, 0x3c, 0xf4, 0x1e, 0xc0, 0x70, 0x44, 0xf7, 0xda, 0xc4, 0xf6, 0x49, 0x20, 0x4f, 0xf0, 0x17, + 0xd3, 0x40, 0x5a, 0x9e, 0x6d, 0xf5, 0x93, 0x48, 0x7c, 0x7e, 0xb7, 0x23, 0x79, 0xac, 0x60, 0xa1, + 0x0e, 0x54, 0xf9, 0xc6, 0x6f, 0x59, 0xbb, 0xa4, 0xcf, 0x0c, 0x36, 0x9f, 0xd1, 0xbf, 0x6f, 0x45, + 0x52, 0xb1, 0x53, 0x8b, 0xdb, 0x28, 0x56, 0x61, 0xcd, 0xdf, 0x31, 0x60, 0x99, 0x4f, 0xc6, 0xb6, + 0x47, 0x03, 0x91, 0xb7, 0x70, 0xcf, 0xff, 0x3c, 0x94, 0xd8, 0x39, 0x60, 0xb9, 0x1d, 0x7e, 0x06, + 0x56, 0xc4, 0xaa, 0xad, 0x8b, 0x26, 0x1c, 0xd2, 0xd0, 0x25, 0x28, 0x58, 0x7e, 0x4f, 0x78, 0x86, + 0x4a, 0xb3, 0xcc, 0x42, 0x90, 0x35, 0xbf, 0x47, 0x31, 0x6f, 0x65, 0x26, 0x42, 0x6d, 0xdf, 0x19, + 0x4e, 0xe4, 0xa2, 0x6d, 0xde, 0x8a, 0x25, 0xd5, 0xfc, 0x69, 0x09, 0xe6, 0xd5, 0xec, 0xfa, 0x4b, + 0x88, 0xb9, 0xde, 0x87, 0x72, 0x98, 0xad, 0xc9, 0x55, 0xbb, 0x92, 0x61, 0x6a, 0x45, 0xee, 0x86, + 0xa5, 0x60, 0x73, 0x9e, 0xb9, 0x8e, 0xf0, 0x17, 0x8e, 0x00, 0x11, 0x81, 0x25, 0x79, 0xd0, 0x93, + 0x4e, 0xf3, 0x90, 0xcf, 0xbd, 0x3c, 0x9f, 0x33, 0xd9, 0xd7, 0xd9, 0xa3, 0x71, 0x7d, 0x69, 0x27, + 0x01, 0x80, 0x27, 0x20, 0xd1, 0x1a, 0x14, 0xba, 0xbe, 0x37, 0xe0, 0x9e, 0x29, 0x23, 0x34, 0x5f, + 0xa1, 0x5b, 0xbe, 0x37, 0xc0, 0x5c, 0x14, 0xbd, 0x07, 0x73, 0xbb, 0x3c, 0x35, 0x95, 0xbe, 0x2a, + 0x53, 0x90, 0x98, 0xcc, 0x65, 0x9b, 0xc0, 0xd6, 0x54, 0x34, 0x63, 0x89, 0x87, 0xae, 0xe8, 0x87, + 0xec, 0x1c, 0xdf, 0xfa, 0x8b, 0xc7, 0x1e, 0xb0, 0xaf, 0x43, 0x9e, 0xb8, 0x07, 0xb5, 0x12, 0xb7, + 0xf4, 0x8b, 0x69, 0xc3, 0xd9, 0x74, 0x0f, 0x1e, 0x58, 0x7e, 0xb3, 0x2a, 0x97, 0x36, 0xbf, 0xe9, + 0x1e, 0x60, 0x26, 0x83, 0xf6, 0xa1, 0xaa, 0x4c, 0x4f, 0xad, 0xcc, 0x21, 0xae, 0xcd, 0x18, 0xb6, + 0x89, 0x5c, 0x38, 0xda, 0x33, 0xca, 0x0a, 0x60, 0x15, 0x1d, 0x7d, 0xcf, 0x80, 0x73, 0x1d, 0xcf, + 0xde, 0x67, 0xc7, 0xb7, 0x6f, 0x05, 0xa4, 0x77, 0x28, 0x8f, 0x2e, 0xee, 0x09, 0xab, 0x57, 0x6f, + 0x66, 0xd0, 0xbb, 0x91, 0x26, 0xdf, 0xbc, 0x70, 0x34, 0xae, 0x9f, 0x4b, 0x25, 0xe1, 0x74, 0x8d, + 0xbc, 0x2f, 0x94, 0xaf, 0x42, 0xb2, 0x2f, 0x90, 0xb9, 0x2f, 0xed, 0x34, 0x79, 0xd1, 0x97, 0x54, + 0x12, 0x4e, 0xd7, 0x68, 0xfe, 0x53, 0x51, 0x3a, 0x56, 0x59, 0xe2, 0x78, 0x4d, 0x4b, 0x83, 0xeb, + 0x89, 0x34, 0x78, 0x51, 0x61, 0x55, 0x72, 0xe0, 0xd8, 0x22, 0x73, 0x4f, 0xd9, 0x22, 0x1b, 0x00, + 0x62, 0x0e, 0xbb, 0x4e, 0x9f, 0x84, 0x1e, 0x89, 0x39, 0x88, 0x8d, 0xa8, 0x15, 0x2b, 0x1c, 0xa8, + 0x05, 0xf9, 0x9e, 0x8c, 0x71, 0xb3, 0x79, 0x87, 0xdb, 0x4e, 0xa0, 0xf6, 0xa1, 0xc4, 0x2c, 0xf4, + 0xb6, 0x13, 0x60, 0x06, 0x83, 0x1e, 0xc0, 0x1c, 0xf7, 0xbb, 0xb4, 0x56, 0xcc, 0x9c, 0xbf, 0xf0, + 0x6d, 0x2e, 0xd1, 0x22, 0xdf, 0xc9, 0x1b, 0x29, 0x96, 0x68, 0x2c, 0x2e, 0x60, 0x91, 0x10, 0xf9, + 0x38, 0xd8, 0x70, 0x7c, 0x59, 0x37, 0x53, 0xc2, 0xfa, 0x90, 0x82, 0x15, 0x2e, 0xf4, 0x2d, 0x98, + 0x97, 0x2b, 0x28, 0x8e, 0xad, 0xd2, 0x8c, 0xc7, 0x96, 0x08, 0x82, 0x14, 0x04, 0xac, 0xe1, 0xa1, + 0xdf, 0x84, 0x12, 0xe5, 0x7f, 0xd1, 0x19, 0x76, 0xa2, 0x90, 0x55, 0x27, 0x30, 0xca, 0xd1, 0x05, + 0x89, 0xe2, 0x10, 0x15, 0xed, 0xf3, 0x41, 0x77, 0x9d, 0xde, 0x5d, 0x6b, 0xc8, 0x76, 0x1d, 0xd3, + 0xf1, 0x6b, 0x99, 0x52, 0x1f, 0x29, 0xa4, 0xaa, 0x51, 0x67, 0x4b, 0x42, 0x62, 0x05, 0xde, 0xfc, + 0x79, 0x18, 0x6a, 0xf3, 0x83, 0xd1, 0x4a, 0xa9, 0xba, 0x3d, 0xe5, 0xac, 0x2b, 0xe1, 0xcc, 0x72, + 0x5f, 0xa4, 0x33, 0x33, 0xff, 0xa3, 0x14, 0x6e, 0x5a, 0x91, 0x1c, 0x5d, 0x81, 0xe2, 0x70, 0xcf, + 0xa2, 0xe1, 0xae, 0x0d, 0x33, 0x93, 0xe2, 0x36, 0x6b, 0x7c, 0x34, 0xae, 0x83, 0x88, 0x16, 0xd8, + 0x2f, 0x2c, 0x38, 0x79, 0xc0, 0x6e, 0xb9, 0x36, 0xe9, 0xf7, 0x49, 0x47, 0x86, 0xe0, 0x71, 0xc0, + 0x1e, 0x12, 0x70, 0xcc, 0x83, 0x6e, 0x44, 0x55, 0x1b, 0xb1, 0x0b, 0x57, 0xf4, 0xaa, 0xcd, 0x23, + 0x66, 0x5d, 0xa2, 0xdc, 0x30, 0xb5, 0x8a, 0x53, 0x38, 0xbe, 0x8a, 0x83, 0xba, 0xb0, 0x40, 0x03, + 0xcb, 0x0f, 0xa2, 0xc8, 0xf8, 0x04, 0xc1, 0x38, 0x3a, 0x1a, 0xd7, 0x17, 0xda, 0x1a, 0x0a, 0x4e, + 0xa0, 0xa2, 0x11, 0x2c, 0xdb, 0xde, 0x60, 0xd8, 0x27, 0x61, 0x49, 0x4a, 0x28, 0x9b, 0xbd, 0xd2, + 0x76, 0x9e, 0xa5, 0x7f, 0xeb, 0x93, 0x50, 0x38, 0x0d, 0x1f, 0xfd, 0x3a, 0x94, 0x3b, 0x23, 0xdf, + 0x62, 0x8d, 0x32, 0xb0, 0x7f, 0x36, 0x4c, 0x65, 0x36, 0x64, 0xfb, 0xa3, 0x71, 0xfd, 0x34, 0xcb, + 0x05, 0x1a, 0x61, 0x03, 0x8e, 0x44, 0xd0, 0x2e, 0x5c, 0xf4, 0x78, 0xf0, 0x2b, 0x5c, 0x9f, 0x08, + 0x30, 0xc2, 0xed, 0x2d, 0xab, 0xdc, 0x61, 0xd9, 0xf2, 0xe2, 0xfd, 0xa9, 0x9c, 0xf8, 0x18, 0x14, + 0x74, 0x1b, 0xe6, 0xc4, 0x26, 0x92, 0xa7, 0x62, 0xa6, 0xf8, 0x04, 0xc4, 0x4d, 0x05, 0x13, 0xc3, + 0x52, 0x1c, 0x3d, 0x84, 0x39, 0xa1, 0x46, 0x1e, 0x69, 0xd7, 0x66, 0x2b, 0xdc, 0x8a, 0xee, 0xc7, + 0xfe, 0x53, 0xfc, 0xc6, 0x12, 0x13, 0xed, 0xf0, 0x32, 0x19, 0xf3, 0xcb, 0x55, 0xbe, 0xcf, 0xb2, + 0x14, 0x9a, 0xdb, 0x4c, 0x60, 0xcb, 0xed, 0x7a, 0x5a, 0x79, 0x8c, 0x7b, 0x65, 0x81, 0xc5, 0xbc, + 0x72, 0xdf, 0xeb, 0xb5, 0x5d, 0x67, 0x38, 0x24, 0x41, 0x6d, 0x5e, 0xf7, 0xca, 0xad, 0x88, 0x82, + 0x15, 0x2e, 0x44, 0xb8, 0x53, 0x13, 0xa5, 0x5c, 0x5a, 0x3b, 0xcd, 0x7b, 0x73, 0x65, 0x86, 0x2a, + 0x97, 0x90, 0xd4, 0xdc, 0x99, 0x04, 0xc3, 0x0a, 0xb0, 0x69, 0xcb, 0x92, 0x88, 0x3a, 0x3b, 0xe8, + 0x9e, 0x92, 0x03, 0xdd, 0x38, 0xc9, 0xfc, 0xee, 0x78, 0x6a, 0x5a, 0x64, 0xb6, 0x64, 0x56, 0xa1, + 0xb3, 0xa0, 0xeb, 0x32, 0xa7, 0xd9, 0x70, 0x7a, 0x84, 0x06, 0xd2, 0xc5, 0xe8, 0x49, 0x8a, 0x20, + 0x61, 0x95, 0xcf, 0xfc, 0x49, 0x01, 0x4e, 0x4b, 0x38, 0x11, 0x71, 0xa0, 0xeb, 0x5a, 0x68, 0xf1, + 0x6c, 0x22, 0xb4, 0x38, 0xa3, 0x31, 0x2b, 0xc1, 0x85, 0x0f, 0x0b, 0x7a, 0x18, 0x25, 0x83, 0x8c, + 0x1b, 0x99, 0x23, 0x36, 0x0d, 0x59, 0x78, 0x08, 0x3d, 0x5e, 0xc3, 0x09, 0x0d, 0x4c, 0xa7, 0x1e, + 0x2e, 0xc9, 0x54, 0xe0, 0x46, 0xe6, 0xc8, 0x2c, 0x45, 0xa7, 0x1e, 0x97, 0xe1, 0x84, 0x06, 0xa6, + 0xd3, 0x1e, 0xd1, 0xc0, 0x1b, 0x44, 0x3a, 0x0b, 0x99, 0x75, 0xae, 0x73, 0xc1, 0x14, 0x9d, 0xeb, + 0x1a, 0x22, 0x4e, 0x68, 0x40, 0x3f, 0x34, 0xe0, 0xfc, 0x07, 0xc4, 0xdd, 0x77, 0x5c, 0xba, 0xed, + 0x0c, 0x49, 0xdf, 0x71, 0xe3, 0x11, 0x0b, 0xdf, 0xfb, 0x1b, 0x19, 0xb4, 0xdf, 0xd1, 0x11, 0xf4, + 0x6e, 0x7c, 0xe5, 0x68, 0x5c, 0x3f, 0x7f, 0x27, 0x5d, 0x07, 0x9e, 0xa6, 0xdc, 0xfc, 0x6e, 0x51, + 0x5a, 0xbc, 0x7a, 0x32, 0xaa, 0x67, 0x89, 0xf1, 0x98, 0xb3, 0xc4, 0x87, 0x05, 0x7e, 0x2b, 0xec, + 0xd8, 0xf2, 0x62, 0x6c, 0x06, 0xab, 0xb9, 0xad, 0x09, 0x8a, 0x43, 0x99, 0xcf, 0xa6, 0x4e, 0xc0, + 0x09, 0x0d, 0xc8, 0x85, 0xd3, 0x02, 0x3c, 0x54, 0x99, 0xcf, 0x7c, 0xbf, 0x77, 0xdb, 0x09, 0xde, + 0x8e, 0xe4, 0x84, 0xc6, 0x33, 0x47, 0xe3, 0xfa, 0x69, 0xad, 0x1d, 0xeb, 0xf0, 0x68, 0x04, 0x4b, + 0x4a, 0x99, 0x91, 0x4f, 0x97, 0xb4, 0x99, 0xd7, 0x66, 0x2b, 0x6c, 0x0a, 0x85, 0x3c, 0x85, 0xdd, + 0x4a, 0x00, 0xe2, 0x09, 0x15, 0x72, 0x98, 0x7d, 0x2b, 0x1a, 0x66, 0x71, 0x96, 0x61, 0xb6, 0xac, + 0xf4, 0x61, 0xc6, 0xed, 0x58, 0x87, 0x47, 0xdf, 0x86, 0xa5, 0xdd, 0xc4, 0x65, 0xaa, 0x3c, 0xab, + 0x6f, 0x66, 0xca, 0x33, 0x52, 0xee, 0x61, 0xc5, 0x58, 0x93, 0x24, 0x3c, 0xa1, 0xc7, 0xfc, 0x71, + 0x01, 0xd0, 0xe4, 0x2d, 0x01, 0xba, 0xa6, 0xb9, 0xb2, 0xcb, 0x09, 0x57, 0xb6, 0xa4, 0x4a, 0x28, + 0x9e, 0xec, 0x21, 0xcc, 0x89, 0xfe, 0xce, 0x50, 0xbd, 0x90, 0x1d, 0x91, 0x60, 0x69, 0x46, 0x21, + 0x31, 0x59, 0x00, 0x2f, 0xed, 0x51, 0xda, 0xdd, 0x09, 0xe0, 0xd3, 0xac, 0x3c, 0x44, 0x45, 0x7b, + 0xf2, 0x20, 0x10, 0xb6, 0x20, 0x2d, 0xed, 0xfa, 0x89, 0x4a, 0xe8, 0xa2, 0xa8, 0xa0, 0xb4, 0x63, + 0x15, 0x5a, 0x4e, 0x54, 0xdf, 0xda, 0x95, 0xa6, 0xf5, 0x04, 0x13, 0xa5, 0x98, 0x95, 0xc4, 0x44, + 0x04, 0x2a, 0xd1, 0x3a, 0x4b, 0x43, 0x3a, 0x81, 0x82, 0x74, 0x0b, 0x8a, 0x91, 0xcd, 0x7f, 0x37, + 0x64, 0x90, 0xfe, 0xc0, 0xeb, 0x8f, 0x06, 0x04, 0x5d, 0x86, 0x82, 0x6b, 0x0d, 0x42, 0x9b, 0x89, + 0x6e, 0xff, 0xf8, 0xa3, 0x06, 0x4e, 0xe1, 0xb7, 0x7f, 0xfc, 0x4c, 0x98, 0x25, 0x8d, 0x8e, 0x35, + 0x24, 0x93, 0x4e, 0x59, 0xf8, 0x92, 0x98, 0xe8, 0x7d, 0x98, 0x1b, 0x78, 0x23, 0x37, 0x08, 0xcb, + 0x92, 0xaf, 0xcd, 0x86, 0x7e, 0x97, 0xc9, 0xc6, 0xe0, 0xfc, 0x27, 0xc5, 0x12, 0xd2, 0x7c, 0x07, + 0x96, 0x92, 0xbc, 0x68, 0x0d, 0x16, 0x3b, 0x84, 0x06, 0x8e, 0xcb, 0xe3, 0xd7, 0x6d, 0x2b, 0xd8, + 0x93, 0x63, 0x3f, 0x2f, 0x41, 0x16, 0x37, 0x74, 0x32, 0x4e, 0xf2, 0x9b, 0x7f, 0x99, 0x93, 0xc7, + 0x80, 0x3a, 0x42, 0xf4, 0xba, 0xb6, 0xfb, 0x9e, 0x4f, 0xec, 0xbe, 0x73, 0x13, 0x02, 0xca, 0x16, + 0xbc, 0x03, 0x73, 0x54, 0x2d, 0xfb, 0xbe, 0x90, 0x16, 0xe0, 0x8a, 0xd4, 0x55, 0x9b, 0x54, 0x1e, + 0xe3, 0xca, 0xbc, 0x59, 0x22, 0xa0, 0x07, 0xfc, 0xce, 0x43, 0x64, 0x9c, 0x72, 0xcb, 0xbd, 0x94, + 0x06, 0x17, 0xa5, 0xa8, 0x1a, 0xe2, 0x69, 0x79, 0x35, 0x22, 0x48, 0x38, 0x86, 0x42, 0x6f, 0x41, + 0xde, 0xa6, 0xce, 0x71, 0x15, 0xc2, 0xf5, 0xf6, 0x96, 0x86, 0xc5, 0xab, 0x16, 0xeb, 0xed, 0x2d, + 0xcc, 0x04, 0xcd, 0xdf, 0x2b, 0x81, 0x92, 0xa5, 0xa2, 0xb7, 0x60, 0x81, 0x12, 0xff, 0xc0, 0xb1, + 0xc9, 0x9a, 0x6d, 0xb3, 0x85, 0x91, 0xf3, 0x16, 0x3d, 0x13, 0x68, 0x6b, 0x54, 0x9c, 0xe0, 0xe6, + 0x6f, 0x30, 0x54, 0xab, 0xcc, 0xfe, 0x06, 0xe3, 0x71, 0xf6, 0x18, 0x57, 0x73, 0xf3, 0x4f, 0xbb, + 0x9a, 0xfb, 0x2d, 0x28, 0x53, 0x3d, 0x8c, 0xfa, 0x5a, 0xf6, 0x08, 0x59, 0x46, 0x2e, 0xd1, 0x45, + 0x53, 0x14, 0xae, 0x44, 0x98, 0x6c, 0x52, 0x64, 0x7e, 0x53, 0x9c, 0x6d, 0x52, 0x1e, 0x93, 0xd9, + 0x7c, 0x03, 0x2a, 0x3e, 0x11, 0x13, 0x44, 0xa5, 0x6f, 0x4a, 0x2d, 0xf1, 0x60, 0xc9, 0x84, 0xc9, + 0x87, 0x23, 0xc7, 0x27, 0x03, 0xe2, 0x06, 0x34, 0x4e, 0xe0, 0x43, 0x2a, 0xc5, 0x31, 0x1a, 0xfa, + 0x00, 0x60, 0x18, 0xdd, 0x17, 0xc8, 0xf2, 0x51, 0xe6, 0xb4, 0x41, 0xbf, 0x69, 0x88, 0xf3, 0x95, + 0xb8, 0x1d, 0x2b, 0xe8, 0xe8, 0x7d, 0xb8, 0x10, 0x67, 0xc0, 0x1b, 0xc4, 0xea, 0xf0, 0xe0, 0x4e, + 0x5e, 0xca, 0x89, 0x6b, 0xaa, 0xaf, 0x1e, 0x8d, 0xeb, 0x17, 0xd6, 0xa7, 0x31, 0xe1, 0xe9, 0xf2, + 0x68, 0x00, 0xf3, 0xae, 0xd7, 0x21, 0x6d, 0xd2, 0x27, 0x76, 0xe0, 0xf9, 0x32, 0x55, 0xcd, 0x52, + 0x4a, 0x12, 0x45, 0x4f, 0xab, 0x7f, 0x4f, 0x11, 0x17, 0x85, 0x31, 0xb5, 0x05, 0x6b, 0xf0, 0xe8, + 0x0d, 0x58, 0xe0, 0x4e, 0x6e, 0xc7, 0x1f, 0xd1, 0x80, 0x74, 0xd6, 0xd7, 0x78, 0x4a, 0x5b, 0x16, + 0x67, 0xe5, 0x5d, 0x8d, 0x82, 0x13, 0x9c, 0xe6, 0x1f, 0x1a, 0x90, 0xf2, 0x3c, 0x4b, 0x33, 0x7d, + 0xe3, 0x69, 0x9b, 0xfe, 0x0b, 0x9a, 0x8b, 0x53, 0x2f, 0x70, 0x34, 0xf7, 0x65, 0xfe, 0x85, 0x01, + 0x67, 0xd3, 0x6a, 0x6b, 0xcc, 0x06, 0x63, 0xbf, 0x66, 0xcc, 0x58, 0x66, 0x54, 0x6f, 0x7d, 0xd3, + 0x5c, 0xdb, 0x82, 0xe2, 0xe2, 0x37, 0x1c, 0x5f, 0xf6, 0x31, 0xf2, 0x45, 0x1b, 0x1a, 0x15, 0x27, + 0xb8, 0xcd, 0xef, 0x17, 0x60, 0x39, 0x25, 0xd7, 0x41, 0x9b, 0xf2, 0x56, 0x65, 0x86, 0x0b, 0xc1, + 0xe8, 0x00, 0xd6, 0x6e, 0x56, 0x60, 0x38, 0xea, 0xf7, 0x9f, 0xec, 0x62, 0x30, 0x94, 0xc7, 0x0a, + 0x56, 0x78, 0x4d, 0x92, 0x3f, 0xc1, 0x35, 0xc9, 0x1d, 0x40, 0xe4, 0xe3, 0xa1, 0x47, 0x89, 0xcc, + 0x59, 0x3d, 0x1e, 0xb7, 0x14, 0xb8, 0x0d, 0x46, 0x4f, 0xaf, 0x36, 0x27, 0x38, 0x70, 0x8a, 0x14, + 0x5a, 0x85, 0x4a, 0xd7, 0xf3, 0x6d, 0xc2, 0x7a, 0xc9, 0x3d, 0x97, 0x52, 0xf5, 0xbb, 0x15, 0x12, + 0x70, 0xcc, 0x83, 0xde, 0x8b, 0xab, 0xc2, 0x73, 0x99, 0x2f, 0x33, 0xc5, 0x98, 0xb9, 0xa3, 0x98, + 0x5e, 0x0e, 0x5e, 0x83, 0x45, 0x2e, 0xb0, 0xb6, 0xbd, 0x15, 0xde, 0x37, 0x95, 0xf4, 0xe8, 0xa0, + 0xa9, 0x93, 0x71, 0x92, 0xdf, 0xfc, 0x51, 0x11, 0x96, 0x53, 0x32, 0xfc, 0xe8, 0x8e, 0xcd, 0x78, + 0x92, 0x3b, 0xb6, 0x2f, 0xca, 0x12, 0x5e, 0x82, 0x92, 0xeb, 0xad, 0x5b, 0xf6, 0x1e, 0x91, 0xef, + 0x19, 0xa2, 0x29, 0xba, 0x27, 0x9a, 0x71, 0x48, 0x0f, 0x8d, 0xa6, 0x70, 0x02, 0xa3, 0x99, 0x79, + 0xa1, 0xdf, 0x0a, 0xab, 0x2c, 0x5d, 0xa7, 0x4f, 0x78, 0xac, 0x36, 0x97, 0xd8, 0x99, 0x1a, 0x15, + 0x27, 0xb8, 0xd1, 0xd7, 0xa1, 0x22, 0x96, 0xc7, 0xef, 0xd1, 0x0c, 0xb7, 0x81, 0x51, 0x67, 0x9a, + 0xa1, 0x10, 0x8e, 0xe5, 0xd1, 0x10, 0xce, 0xf3, 0x74, 0x80, 0xf9, 0xeb, 0x81, 0xf3, 0x6d, 0x11, + 0x0f, 0x8a, 0x67, 0x57, 0xa2, 0xce, 0x79, 0xe3, 0x68, 0x5c, 0x3f, 0xbf, 0x95, 0xce, 0xf2, 0x68, + 0x3a, 0x09, 0x4f, 0x83, 0x45, 0xdf, 0x80, 0xd2, 0x01, 0x8f, 0xa8, 0xc2, 0x9b, 0x89, 0xc6, 0x6c, + 0xd1, 0x71, 0xbc, 0x8a, 0xe2, 0x37, 0xc5, 0x21, 0x9e, 0xf9, 0x7d, 0x03, 0xd2, 0xaf, 0x07, 0xf5, + 0x39, 0x33, 0x9e, 0x70, 0xce, 0x9e, 0x8f, 0xed, 0x4a, 0x94, 0xf3, 0xab, 0x69, 0x36, 0x65, 0xfe, + 0x91, 0x01, 0xcb, 0x29, 0xf5, 0x8d, 0x5f, 0x8e, 0x23, 0xe9, 0xb3, 0x5c, 0xb2, 0x73, 0x9b, 0x07, + 0xc4, 0x0d, 0x4e, 0x76, 0x29, 0xb9, 0x29, 0xae, 0x02, 0x73, 0xb2, 0xaa, 0x9f, 0xa9, 0x38, 0xc1, + 0xeb, 0xc3, 0xfa, 0x1d, 0xe0, 0x13, 0x78, 0xee, 0xe9, 0x77, 0xce, 0x85, 0x2f, 0xfb, 0xce, 0xd9, + 0xfc, 0x2b, 0x03, 0x16, 0xf4, 0xbb, 0x4e, 0xf4, 0x55, 0xc8, 0x8f, 0x7c, 0x47, 0x4e, 0x6a, 0xd4, + 0xfb, 0x77, 0xf0, 0x16, 0x66, 0xed, 0x8c, 0xec, 0x93, 0xae, 0x5c, 0xb1, 0x88, 0x8c, 0x49, 0x17, + 0xb3, 0x76, 0x44, 0xa0, 0x3a, 0xf4, 0xbd, 0x8f, 0x0f, 0xc5, 0x39, 0x3f, 0xc3, 0xfb, 0xec, 0xed, + 0x58, 0x2a, 0x2e, 0x23, 0x2b, 0x8d, 0x58, 0xc5, 0xe5, 0x11, 0xd4, 0x64, 0x71, 0xec, 0x97, 0xc3, + 0x5c, 0xff, 0x2e, 0x07, 0x25, 0x69, 0x34, 0xe8, 0x43, 0x58, 0xe8, 0x69, 0xd3, 0x3b, 0x43, 0xb7, + 0x12, 0x77, 0xd0, 0x91, 0xcb, 0xd5, 0xdb, 0x71, 0x42, 0x01, 0xfa, 0x6d, 0x38, 0xd3, 0x73, 0x02, + 0x7d, 0x4c, 0x33, 0x54, 0x0e, 0x6e, 0x27, 0x65, 0x9b, 0x17, 0xa4, 0xe2, 0x33, 0x13, 0x24, 0x3c, + 0xa9, 0x09, 0xdd, 0x87, 0x82, 0x4f, 0xba, 0xb3, 0x3c, 0x72, 0x62, 0x7b, 0x8a, 0x74, 0xf9, 0x1e, + 0x8b, 0xa2, 0x2f, 0x4c, 0xba, 0x14, 0x73, 0x20, 0xf3, 0x77, 0xc5, 0x52, 0x27, 0x0a, 0x84, 0xff, + 0x13, 0x9f, 0x4c, 0xfc, 0x97, 0x01, 0x10, 0x77, 0xf6, 0xff, 0xdf, 0xda, 0x9a, 0x7f, 0x9e, 0x83, + 0x49, 0x46, 0xb6, 0x2f, 0x6c, 0x91, 0x3d, 0x1a, 0xa9, 0x9f, 0x29, 0x49, 0x2a, 0x7a, 0x08, 0x73, + 0x16, 0xff, 0xce, 0x67, 0x86, 0x1e, 0x0b, 0x55, 0xeb, 0x9e, 0x1b, 0xf8, 0x5e, 0xff, 0x1d, 0x4a, + 0x7c, 0xe5, 0xe3, 0x1a, 0x8e, 0x85, 0x25, 0x26, 0x22, 0x2c, 0x3d, 0x91, 0xdf, 0xea, 0xcc, 0xf0, + 0x4c, 0x7e, 0x52, 0x81, 0x92, 0xaa, 0x48, 0x38, 0x1c, 0x23, 0xcf, 0x70, 0x6f, 0x6d, 0x7e, 0xcf, + 0x80, 0xa5, 0x64, 0x35, 0x9d, 0xc9, 0xf3, 0x60, 0x63, 0x6b, 0x23, 0x79, 0x57, 0xb1, 0x25, 0x9a, + 0x71, 0x48, 0x47, 0x77, 0xa0, 0xc4, 0x82, 0x4e, 0x2c, 0xbd, 0x6d, 0xc6, 0x90, 0x95, 0x9f, 0xef, + 0xb7, 0x84, 0x1c, 0x0e, 0x01, 0xcc, 0x7f, 0x30, 0x00, 0x4d, 0xd6, 0x5b, 0xd1, 0x36, 0x9c, 0x15, + 0x5f, 0x62, 0xc8, 0x47, 0x04, 0x5b, 0x5a, 0xd7, 0x2e, 0xc9, 0xae, 0x9d, 0x6d, 0xa5, 0xf0, 0xe0, + 0x54, 0xc9, 0x28, 0xc8, 0xce, 0x9d, 0x3c, 0xc8, 0x7e, 0x01, 0xe6, 0x86, 0x6c, 0xae, 0x3a, 0x32, + 0x12, 0x8e, 0x56, 0x7c, 0x9b, 0xb7, 0x62, 0x49, 0x35, 0xff, 0x3a, 0x07, 0xb5, 0x69, 0xcf, 0xb0, + 0xbf, 0x80, 0x91, 0x3d, 0xd4, 0x46, 0xf6, 0x46, 0xe6, 0x37, 0x3f, 0x81, 0x4f, 0xac, 0xc1, 0x8e, + 0xd5, 0x3b, 0x3e, 0xc7, 0x1c, 0xc0, 0xa2, 0xa2, 0xf5, 0x84, 0x9f, 0xdc, 0x44, 0x39, 0x52, 0x4b, + 0x87, 0xc2, 0x49, 0x6c, 0xb3, 0x0d, 0x10, 0xbf, 0x23, 0xcd, 0x50, 0x83, 0x7e, 0x0e, 0x8a, 0x07, + 0x56, 0x7f, 0x14, 0x7e, 0xb9, 0x18, 0xbd, 0x06, 0x7f, 0xc0, 0x1a, 0xb1, 0xa0, 0x99, 0x7f, 0x9c, + 0x83, 0xaa, 0xf2, 0xce, 0xe9, 0x69, 0xa5, 0xdf, 0xcf, 0x40, 0xce, 0xa2, 0x3c, 0xdd, 0xa9, 0x88, + 0x8b, 0xe9, 0x35, 0x8a, 0x73, 0x16, 0x45, 0xef, 0x42, 0x71, 0x68, 0x05, 0x7b, 0xe1, 0x5b, 0xf6, + 0xab, 0xb3, 0xbd, 0xc2, 0x62, 0xe9, 0x49, 0x3c, 0x0e, 0xf6, 0x8b, 0x62, 0x81, 0x97, 0xc8, 0xf2, + 0xf2, 0x4f, 0x2f, 0xcb, 0x33, 0xbf, 0x6b, 0xc0, 0x62, 0xa2, 0x0f, 0xe8, 0x2a, 0x00, 0x8d, 0x7e, + 0xc9, 0x25, 0x88, 0x0a, 0x69, 0x31, 0x1f, 0x56, 0xb8, 0x9e, 0xb8, 0x60, 0xd2, 0x87, 0xf3, 0x53, + 0x8c, 0x93, 0xa5, 0x88, 0x6c, 0xc5, 0xe9, 0xd0, 0xb2, 0x49, 0xf2, 0xc9, 0xfe, 0xbd, 0x90, 0x80, + 0x63, 0x9e, 0xc8, 0x78, 0x72, 0xd3, 0x8c, 0xc7, 0xfc, 0x47, 0x03, 0x2e, 0x1d, 0x77, 0x19, 0xcc, + 0x92, 0x7e, 0x79, 0xe3, 0x1b, 0xa5, 0x99, 0x89, 0x2b, 0x81, 0x3b, 0x3a, 0x19, 0x27, 0xf9, 0xd1, + 0x75, 0xa8, 0x2a, 0x4d, 0xb2, 0x33, 0x51, 0x1c, 0xa9, 0x88, 0x63, 0x95, 0xef, 0x09, 0xc2, 0x78, + 0xf3, 0x6f, 0x0d, 0x38, 0x9b, 0x56, 0x39, 0x44, 0xbd, 0xf0, 0x1b, 0x0b, 0x91, 0xbb, 0x35, 0x4f, + 0x58, 0x81, 0x6c, 0xf0, 0x2f, 0x2d, 0x36, 0xdd, 0xc0, 0x3f, 0x4c, 0xff, 0xfa, 0xe2, 0xe2, 0x4d, + 0x80, 0x98, 0x07, 0x2d, 0x41, 0x7e, 0x9f, 0x1c, 0x8a, 0x89, 0xc3, 0xec, 0x4f, 0x74, 0x56, 0xdb, + 0xb4, 0x72, 0x97, 0xbe, 0x91, 0xbb, 0x69, 0xbc, 0x51, 0xfe, 0x83, 0x3f, 0xa9, 0x9f, 0xfa, 0xce, + 0x2f, 0x2e, 0x9f, 0x32, 0x7f, 0x60, 0x80, 0x1a, 0x65, 0xa3, 0x97, 0xa1, 0xb2, 0x17, 0x04, 0x43, + 0xde, 0x24, 0x9f, 0x74, 0xf1, 0x2b, 0x89, 0xb7, 0x77, 0x76, 0xb6, 0x79, 0x23, 0x8e, 0xe9, 0xa8, + 0x01, 0xc0, 0x7e, 0x50, 0xc1, 0x5d, 0x88, 0x9f, 0x61, 0x32, 0xee, 0xb6, 0x60, 0x57, 0x38, 0x44, + 0x32, 0x2a, 0x98, 0xc5, 0xa7, 0x7b, 0x32, 0x19, 0x15, 0x9c, 0x21, 0xcd, 0xfc, 0x33, 0x03, 0xce, + 0x4c, 0x3c, 0x21, 0x44, 0xdb, 0x51, 0xf8, 0x3d, 0x6b, 0xf1, 0x71, 0x4a, 0xa0, 0xfe, 0xc4, 0xbb, + 0xe8, 0x26, 0x9c, 0x15, 0x88, 0x5c, 0x6b, 0xbc, 0x85, 0x1e, 0xeb, 0x4e, 0xcd, 0x3f, 0x35, 0x00, + 0xe2, 0x72, 0x18, 0xda, 0x85, 0x79, 0xd1, 0x25, 0x2d, 0x8e, 0xcc, 0x3e, 0xc0, 0xb3, 0x52, 0xc5, + 0x7c, 0x5b, 0x41, 0xc1, 0x1a, 0x26, 0xdb, 0xd7, 0xbc, 0x0a, 0xcd, 0x77, 0x57, 0x4e, 0xdf, 0xd7, + 0x77, 0x43, 0x02, 0x8e, 0x79, 0xcc, 0x9f, 0xe7, 0x61, 0x39, 0xe5, 0xd1, 0xca, 0xff, 0xe9, 0xa2, + 0xea, 0x4b, 0x50, 0x12, 0xdf, 0x31, 0xd0, 0x64, 0x74, 0x27, 0x3e, 0x73, 0xa0, 0x38, 0xa4, 0xa3, + 0x2b, 0x50, 0x75, 0x5c, 0x5b, 0xdc, 0xb1, 0x58, 0x61, 0x31, 0x4d, 0xdc, 0x5f, 0xc7, 0xcd, 0x58, + 0xe5, 0xd1, 0xab, 0x6f, 0x73, 0x19, 0xaa, 0x6f, 0x5f, 0x60, 0xf9, 0xe9, 0x9b, 0x70, 0x66, 0x22, + 0xf4, 0xcd, 0x16, 0x07, 0x10, 0xfe, 0xf9, 0x7c, 0x22, 0x0e, 0x10, 0x5f, 0xcd, 0x0b, 0x9a, 0xf9, + 0x43, 0x03, 0x16, 0x12, 0x39, 0xc2, 0x89, 0x4a, 0x35, 0xf7, 0xd5, 0x52, 0xcd, 0xc9, 0xf2, 0x1b, + 0xad, 0x68, 0x63, 0xde, 0x81, 0xf4, 0x57, 0xf0, 0xc9, 0xc5, 0x34, 0x1e, 0xbf, 0x98, 0xe6, 0x4f, + 0x72, 0x50, 0x89, 0x1e, 0x0f, 0xa2, 0x57, 0xb5, 0x99, 0xbb, 0xa0, 0xce, 0xdc, 0xa3, 0x71, 0x5d, + 0x30, 0x2a, 0xd3, 0xf8, 0x3e, 0x54, 0xa2, 0xc7, 0xa7, 0x51, 0x29, 0x2a, 0x7b, 0x9c, 0x17, 0x59, + 0x4d, 0xf4, 0xa2, 0x15, 0xc7, 0x78, 0x2c, 0xf4, 0x0d, 0x5f, 0x87, 0xde, 0x75, 0xfa, 0x7d, 0x87, + 0xca, 0x0b, 0xb6, 0x3c, 0xbf, 0x60, 0x8b, 0x42, 0xdf, 0x8d, 0x14, 0x1e, 0x9c, 0x2a, 0x89, 0xb6, + 0xa1, 0x48, 0x03, 0x32, 0xa4, 0xb2, 0xe6, 0xfc, 0x72, 0xa6, 0x77, 0x95, 0x64, 0xc8, 0x53, 0xfa, + 0xc8, 0x44, 0x58, 0x0b, 0xc5, 0x02, 0xc8, 0xfc, 0x37, 0x03, 0xca, 0x21, 0x0b, 0x7a, 0x45, 0x9b, + 0xbc, 0x5a, 0x62, 0xf2, 0x38, 0xdf, 0xff, 0xda, 0xb9, 0x33, 0xc7, 0x06, 0x2c, 0xe8, 0x6f, 0x44, + 0x94, 0x42, 0x92, 0x71, 0x5c, 0x21, 0x09, 0xbd, 0x02, 0x65, 0xab, 0xdf, 0xf7, 0x3e, 0xda, 0x74, + 0x0f, 0x64, 0xf1, 0x36, 0xba, 0x7b, 0x5e, 0x93, 0xed, 0x38, 0xe2, 0x40, 0x07, 0xb0, 0x28, 0xe4, + 0xe2, 0xd7, 0xbf, 0xf9, 0xcc, 0x57, 0xa0, 0x69, 0xe7, 0x58, 0x73, 0x99, 0x45, 0x5e, 0x6d, 0x1d, + 0x13, 0x27, 0x95, 0x34, 0x6f, 0x7f, 0xfa, 0xf9, 0xca, 0xa9, 0x9f, 0x7d, 0xbe, 0x72, 0xea, 0xb3, + 0xcf, 0x57, 0x4e, 0x7d, 0xe7, 0x68, 0xc5, 0xf8, 0xf4, 0x68, 0xc5, 0xf8, 0xd9, 0xd1, 0x8a, 0xf1, + 0xd9, 0xd1, 0x8a, 0xf1, 0x2f, 0x47, 0x2b, 0xc6, 0xef, 0xff, 0xeb, 0xca, 0xa9, 0x6f, 0x3e, 0xfb, + 0xd8, 0x7f, 0x49, 0xf3, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x88, 0x7d, 0x3c, 0xce, 0xb6, 0x46, + 0x00, 0x00, +} + +func (m *BinaryBuildRequestOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BinaryBuildRequestOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BinaryBuildRequestOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.CommitterEmail) + copy(dAtA[i:], m.CommitterEmail) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CommitterEmail))) + i-- + dAtA[i] = 0x42 + i -= len(m.CommitterName) + copy(dAtA[i:], m.CommitterName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CommitterName))) + i-- + dAtA[i] = 0x3a + i -= len(m.AuthorEmail) + copy(dAtA[i:], m.AuthorEmail) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AuthorEmail))) + i-- + dAtA[i] = 0x32 + i -= len(m.AuthorName) + copy(dAtA[i:], m.AuthorName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AuthorName))) + i-- + dAtA[i] = 0x2a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Commit) + copy(dAtA[i:], m.Commit) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Commit))) + i-- + dAtA[i] = 0x1a + i -= len(m.AsFile) + copy(dAtA[i:], m.AsFile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AsFile))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BinaryBuildSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BinaryBuildSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BinaryBuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.AsFile) + copy(dAtA[i:], m.AsFile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AsFile))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BitbucketWebHookCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BitbucketWebHookCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BitbucketWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.CommonWebHookCause.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Build) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Build) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Build) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildConfigList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildConfigList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildConfigList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildConfigSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildConfigSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildConfigSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FailedBuildsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedBuildsHistoryLimit)) + i-- + dAtA[i] = 0x28 + } + if m.SuccessfulBuildsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulBuildsHistoryLimit)) + i-- + dAtA[i] = 0x20 + } + { + size, err := m.CommonSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.RunPolicy) + copy(dAtA[i:], m.RunPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RunPolicy))) + i-- + dAtA[i] = 0x12 + if len(m.Triggers) > 0 { + for iNdEx := len(m.Triggers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Triggers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *BuildConfigStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildConfigStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ImageChangeTriggers) > 0 { + for iNdEx := len(m.ImageChangeTriggers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImageChangeTriggers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.LastVersion)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *BuildList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildLog) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildLog) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildLog) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *BuildLogOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildLogOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.InsecureSkipTLSVerifyBackend { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + if m.Version != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Version)) + i-- + dAtA[i] = 0x50 + } + i-- + if m.NoWait { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + if m.LimitBytes != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) + i-- + dAtA[i] = 0x40 + } + if m.TailLines != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) + i-- + dAtA[i] = 0x38 + } + i-- + if m.Timestamps { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if m.SinceTime != nil { + { + size, err := m.SinceTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.SinceSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) + i-- + dAtA[i] = 0x20 + } + i-- + if m.Previous { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Follow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildOutput) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildOutput) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildOutput) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ImageLabels) > 0 { + for iNdEx := len(m.ImageLabels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImageLabels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.PushSecret != nil { + { + size, err := m.PushSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.To != nil { + { + size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BuildPostCommitSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildPostCommitSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildPostCommitSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Script) + copy(dAtA[i:], m.Script) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Script))) + i-- + dAtA[i] = 0x1a + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *BuildRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SourceStrategyOptions != nil { + { + size, err := m.SourceStrategyOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.DockerStrategyOptions != nil { + { + size, err := m.DockerStrategyOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if len(m.TriggeredBy) > 0 { + for iNdEx := len(m.TriggeredBy) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TriggeredBy[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.LastVersion != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LastVersion)) + i-- + dAtA[i] = 0x30 + } + if m.Binary != nil { + { + size, err := m.Binary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.From != nil { + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.TriggeredByImage != nil { + { + size, err := m.TriggeredByImage.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Revision != nil { + { + size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ConfigMaps) > 0 { + for iNdEx := len(m.ConfigMaps) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ConfigMaps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if len(m.Secrets) > 0 { + for iNdEx := len(m.Secrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Secrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.SourceSecret != nil { + { + size, err := m.SourceSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i -= len(m.ContextDir) + copy(dAtA[i:], m.ContextDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContextDir))) + i-- + dAtA[i] = 0x32 + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.Git != nil { + { + size, err := m.Git.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Dockerfile != nil { + i -= len(*m.Dockerfile) + copy(dAtA[i:], *m.Dockerfile) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Dockerfile))) + i-- + dAtA[i] = 0x1a + } + if m.Binary != nil { + { + size, err := m.Binary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TriggeredBy) > 0 { + for iNdEx := len(m.TriggeredBy) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TriggeredBy[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.CommonSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + } + i -= len(m.LogSnippet) + copy(dAtA[i:], m.LogSnippet) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LogSnippet))) + i-- + dAtA[i] = 0x62 + if len(m.Stages) > 0 { + for iNdEx := len(m.Stages) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Stages[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } + { + size, err := m.Output.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + if m.Config != nil { + { + size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + i -= len(m.OutputDockerImageReference) + copy(dAtA[i:], m.OutputDockerImageReference) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OutputDockerImageReference))) + i-- + dAtA[i] = 0x42 + i = encodeVarintGenerated(dAtA, i, uint64(m.Duration)) + i-- + dAtA[i] = 0x38 + if m.CompletionTimestamp != nil { + { + size, err := m.CompletionTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.StartTimestamp != nil { + { + size, err := m.StartTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + i-- + if m.Cancelled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildStatusOutput) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildStatusOutput) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildStatusOutput) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.To != nil { + { + size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BuildStatusOutputTo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildStatusOutputTo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildStatusOutputTo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ImageDigest) + copy(dAtA[i:], m.ImageDigest) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageDigest))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.JenkinsPipelineStrategy != nil { + { + size, err := m.JenkinsPipelineStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.CustomStrategy != nil { + { + size, err := m.CustomStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.SourceStrategy != nil { + { + size, err := m.SourceStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.DockerStrategy != nil { + { + size, err := m.DockerStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildTriggerCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildTriggerCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildTriggerCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BitbucketWebHook != nil { + { + size, err := m.BitbucketWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.GitLabWebHook != nil { + { + size, err := m.GitLabWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.ImageChangeBuild != nil { + { + size, err := m.ImageChangeBuild.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.GitHubWebHook != nil { + { + size, err := m.GitHubWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.GenericWebHook != nil { + { + size, err := m.GenericWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildTriggerPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildTriggerPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildTriggerPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BitbucketWebHook != nil { + { + size, err := m.BitbucketWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.GitLabWebHook != nil { + { + size, err := m.GitLabWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.ImageChange != nil { + { + size, err := m.ImageChange.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.GenericWebHook != nil { + { + size, err := m.GenericWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.GitHubWebHook != nil { + { + size, err := m.GitHubWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildVolume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildVolume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Mounts) > 0 { + for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Mounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildVolumeMount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildVolumeMount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildVolumeMount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DestinationPath) + copy(dAtA[i:], m.DestinationPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationPath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CSI != nil { + { + size, err := m.CSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.ConfigMap != nil { + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Secret != nil { + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CommonSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommonSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommonSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MountTrustedCA != nil { + i-- + if *m.MountTrustedCA { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.CompletionDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CompletionDeadlineSeconds)) + i-- + dAtA[i] = 0x40 + } + { + size, err := m.PostCommit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + { + size, err := m.Output.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.Revision != nil { + { + size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.ServiceAccount) + copy(dAtA[i:], m.ServiceAccount) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccount))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CommonWebHookCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommonWebHookCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommonWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Secret) + copy(dAtA[i:], m.Secret) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret))) + i-- + dAtA[i] = 0x12 + if m.Revision != nil { + { + size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConfigMapBuildSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMapBuildSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapBuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DestinationDir) + copy(dAtA[i:], m.DestinationDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationDir))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomBuildStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomBuildStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomBuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.BuildAPIVersion) + copy(dAtA[i:], m.BuildAPIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BuildAPIVersion))) + i-- + dAtA[i] = 0x3a + if len(m.Secrets) > 0 { + for iNdEx := len(m.Secrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Secrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i-- + if m.ForcePull { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + i-- + if m.ExposeDockerSocket { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.PullSecret != nil { + { + size, err := m.PullSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DockerBuildStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DockerBuildStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DockerBuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if m.ImageOptimizationPolicy != nil { + i -= len(*m.ImageOptimizationPolicy) + copy(dAtA[i:], *m.ImageOptimizationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ImageOptimizationPolicy))) + i-- + dAtA[i] = 0x42 + } + if len(m.BuildArgs) > 0 { + for iNdEx := len(m.BuildArgs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.BuildArgs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + i -= len(m.DockerfilePath) + copy(dAtA[i:], m.DockerfilePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerfilePath))) + i-- + dAtA[i] = 0x32 + i-- + if m.ForcePull { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i-- + if m.NoCache { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + if m.PullSecret != nil { + { + size, err := m.PullSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.From != nil { + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DockerStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DockerStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DockerStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NoCache != nil { + i-- + if *m.NoCache { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.BuildArgs) > 0 { + for iNdEx := len(m.BuildArgs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.BuildArgs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GenericWebHookCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenericWebHookCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenericWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Secret) + copy(dAtA[i:], m.Secret) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret))) + i-- + dAtA[i] = 0x12 + if m.Revision != nil { + { + size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GenericWebHookEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenericWebHookEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenericWebHookEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DockerStrategyOptions != nil { + { + size, err := m.DockerStrategyOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Git != nil { + { + size, err := m.Git.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GitBuildSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitBuildSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitBuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ProxyConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0x12 + i -= len(m.URI) + copy(dAtA[i:], m.URI) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URI))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GitHubWebHookCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitHubWebHookCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitHubWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Secret) + copy(dAtA[i:], m.Secret) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret))) + i-- + dAtA[i] = 0x12 + if m.Revision != nil { + { + size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GitInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Refs) > 0 { + for iNdEx := len(m.Refs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Refs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.GitSourceRevision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.GitBuildSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GitLabWebHookCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitLabWebHookCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitLabWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.CommonWebHookCause.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GitRefInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitRefInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitRefInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.GitSourceRevision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.GitBuildSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GitSourceRevision) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitSourceRevision) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitSourceRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Committer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Author.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Commit) + copy(dAtA[i:], m.Commit) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Commit))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageChangeCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageChangeCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageChangeCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FromRef != nil { + { + size, err := m.FromRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.ImageID) + copy(dAtA[i:], m.ImageID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageChangeTrigger) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageChangeTrigger) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageChangeTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + if m.From != nil { + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.LastTriggeredImageID) + copy(dAtA[i:], m.LastTriggeredImageID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LastTriggeredImageID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageChangeTriggerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageChangeTriggerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageChangeTriggerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LastTriggerTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.LastTriggeredImageID) + copy(dAtA[i:], m.LastTriggeredImageID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LastTriggeredImageID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageLabel) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageLabel) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageLabel) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.As) > 0 { + for iNdEx := len(m.As) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.As[iNdEx]) + copy(dAtA[i:], m.As[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.As[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if m.PullSecret != nil { + { + size, err := m.PullSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Paths) > 0 { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Paths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageSourcePath) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageSourcePath) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageSourcePath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DestinationDir) + copy(dAtA[i:], m.DestinationDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationDir))) + i-- + dAtA[i] = 0x12 + i -= len(m.SourcePath) + copy(dAtA[i:], m.SourcePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SourcePath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamTagReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamTagReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamTagReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *JenkinsPipelineBuildStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JenkinsPipelineBuildStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JenkinsPipelineBuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Jenkinsfile) + copy(dAtA[i:], m.Jenkinsfile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Jenkinsfile))) + i-- + dAtA[i] = 0x12 + i -= len(m.JenkinsfilePath) + copy(dAtA[i:], m.JenkinsfilePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.JenkinsfilePath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m OptionalNodeSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m OptionalNodeSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m OptionalNodeSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + keysForItems := make([]string, 0, len(m)) + for k := range m { + keysForItems = append(keysForItems, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForItems) + for iNdEx := len(keysForItems) - 1; iNdEx >= 0; iNdEx-- { + v := m[string(keysForItems[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForItems[iNdEx]) + copy(dAtA[i:], keysForItems[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForItems[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ProxyConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProxyConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProxyConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NoProxy != nil { + i -= len(*m.NoProxy) + copy(dAtA[i:], *m.NoProxy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NoProxy))) + i-- + dAtA[i] = 0x2a + } + if m.HTTPSProxy != nil { + i -= len(*m.HTTPSProxy) + copy(dAtA[i:], *m.HTTPSProxy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HTTPSProxy))) + i-- + dAtA[i] = 0x22 + } + if m.HTTPProxy != nil { + i -= len(*m.HTTPProxy) + copy(dAtA[i:], *m.HTTPProxy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HTTPProxy))) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} + +func (m *SecretBuildSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretBuildSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretBuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DestinationDir) + copy(dAtA[i:], m.DestinationDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationDir))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecretLocalReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretLocalReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretLocalReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecretSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MountPath) + copy(dAtA[i:], m.MountPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath))) + i-- + dAtA[i] = 0x12 + { + size, err := m.SecretSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SourceBuildStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceBuildStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceBuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + i-- + if m.ForcePull { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if m.Incremental != nil { + i-- + if *m.Incremental { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + i -= len(m.Scripts) + copy(dAtA[i:], m.Scripts) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scripts))) + i-- + dAtA[i] = 0x22 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.PullSecret != nil { + { + size, err := m.PullSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SourceControlUser) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceControlUser) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceControlUser) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Email) + copy(dAtA[i:], m.Email) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Email))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SourceRevision) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceRevision) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Git != nil { + { + size, err := m.Git.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SourceStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Incremental != nil { + i-- + if *m.Incremental { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StageInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StageInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StageInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Steps) > 0 { + for iNdEx := len(m.Steps) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Steps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.DurationMilliseconds)) + i-- + dAtA[i] = 0x18 + { + size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StepInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StepInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StepInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.DurationMilliseconds)) + i-- + dAtA[i] = 0x18 + { + size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WebHookTrigger) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebHookTrigger) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebHookTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SecretReference != nil { + { + size, err := m.SecretReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i-- + if m.AllowEnv { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Secret) + copy(dAtA[i:], m.Secret) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *BinaryBuildRequestOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AsFile) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Commit) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AuthorName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AuthorEmail) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CommitterName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CommitterEmail) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BinaryBuildSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AsFile) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BitbucketWebHookCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.CommonWebHookCause.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Build) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildConfigList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildConfigSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Triggers) > 0 { + for _, e := range m.Triggers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RunPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CommonSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.SuccessfulBuildsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.SuccessfulBuildsHistoryLimit)) + } + if m.FailedBuildsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.FailedBuildsHistoryLimit)) + } + return n +} + +func (m *BuildConfigStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.LastVersion)) + if len(m.ImageChangeTriggers) > 0 { + for _, e := range m.ImageChangeTriggers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildLog) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *BuildLogOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.SinceSeconds != nil { + n += 1 + sovGenerated(uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + l = m.SinceTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.TailLines != nil { + n += 1 + sovGenerated(uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + n += 1 + sovGenerated(uint64(*m.LimitBytes)) + } + n += 2 + if m.Version != nil { + n += 1 + sovGenerated(uint64(*m.Version)) + } + n += 2 + return n +} + +func (m *BuildOutput) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.To != nil { + l = m.To.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PushSecret != nil { + l = m.PushSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ImageLabels) > 0 { + for _, e := range m.ImageLabels { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildPostCommitSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Script) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TriggeredByImage != nil { + l = m.TriggeredByImage.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.From != nil { + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Binary != nil { + l = m.Binary.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LastVersion != nil { + n += 1 + sovGenerated(uint64(*m.LastVersion)) + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.TriggeredBy) > 0 { + for _, e := range m.TriggeredBy { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DockerStrategyOptions != nil { + l = m.DockerStrategyOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SourceStrategyOptions != nil { + l = m.SourceStrategyOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BuildSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Binary != nil { + l = m.Binary.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Dockerfile != nil { + l = len(*m.Dockerfile) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Git != nil { + l = m.Git.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ContextDir) + n += 1 + l + sovGenerated(uint64(l)) + if m.SourceSecret != nil { + l = m.SourceSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ConfigMaps) > 0 { + for _, e := range m.ConfigMaps { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.CommonSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.TriggeredBy) > 0 { + for _, e := range m.TriggeredBy { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartTimestamp != nil { + l = m.StartTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CompletionTimestamp != nil { + l = m.CompletionTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Duration)) + l = len(m.OutputDockerImageReference) + n += 1 + l + sovGenerated(uint64(l)) + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Output.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Stages) > 0 { + for _, e := range m.Stages { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.LogSnippet) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildStatusOutput) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.To != nil { + l = m.To.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BuildStatusOutputTo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ImageDigest) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.DockerStrategy != nil { + l = m.DockerStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SourceStrategy != nil { + l = m.SourceStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CustomStrategy != nil { + l = m.CustomStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.JenkinsPipelineStrategy != nil { + l = m.JenkinsPipelineStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BuildTriggerCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.GenericWebHook != nil { + l = m.GenericWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GitHubWebHook != nil { + l = m.GitHubWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ImageChangeBuild != nil { + l = m.ImageChangeBuild.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GitLabWebHook != nil { + l = m.GitLabWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.BitbucketWebHook != nil { + l = m.BitbucketWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BuildTriggerPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.GitHubWebHook != nil { + l = m.GitHubWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GenericWebHook != nil { + l = m.GenericWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ImageChange != nil { + l = m.ImageChange.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GitLabWebHook != nil { + l = m.GitLabWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.BitbucketWebHook != nil { + l = m.BitbucketWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BuildVolume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Mounts) > 0 { + for _, e := range m.Mounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildVolumeMount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DestinationPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CSI != nil { + l = m.CSI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CommonSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServiceAccount) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Output.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.PostCommit.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.CompletionDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.CompletionDeadlineSeconds)) + } + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MountTrustedCA != nil { + n += 2 + } + return n +} + +func (m *CommonWebHookCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ConfigMapBuildSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DestinationDir) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomBuildStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.PullSecret != nil { + l = m.PullSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + n += 2 + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.BuildAPIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DockerBuildStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.From != nil { + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PullSecret != nil { + l = m.PullSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + l = len(m.DockerfilePath) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.BuildArgs) > 0 { + for _, e := range m.BuildArgs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ImageOptimizationPolicy != nil { + l = len(*m.ImageOptimizationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DockerStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.BuildArgs) > 0 { + for _, e := range m.BuildArgs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.NoCache != nil { + n += 2 + } + return n +} + +func (m *GenericWebHookCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GenericWebHookEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Git != nil { + l = m.Git.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DockerStrategyOptions != nil { + l = m.DockerStrategyOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *GitBuildSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.URI) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Ref) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ProxyConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GitHubWebHookCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GitInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.GitBuildSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.GitSourceRevision.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Refs) > 0 { + for _, e := range m.Refs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *GitLabWebHookCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.CommonWebHookCause.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GitRefInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.GitBuildSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.GitSourceRevision.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GitSourceRevision) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Commit) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Author.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Committer.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageChangeCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ImageID) + n += 1 + l + sovGenerated(uint64(l)) + if m.FromRef != nil { + l = m.FromRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ImageChangeTrigger) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LastTriggeredImageID) + n += 1 + l + sovGenerated(uint64(l)) + if m.From != nil { + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *ImageChangeTriggerStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LastTriggeredImageID) + n += 1 + l + sovGenerated(uint64(l)) + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTriggerTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageLabel) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.PullSecret != nil { + l = m.PullSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.As) > 0 { + for _, s := range m.As { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageSourcePath) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SourcePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DestinationDir) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamTagReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JenkinsPipelineBuildStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.JenkinsfilePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Jenkinsfile) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m OptionalNodeSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for k, v := range m { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ProxyConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HTTPProxy != nil { + l = len(*m.HTTPProxy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HTTPSProxy != nil { + l = len(*m.HTTPSProxy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NoProxy != nil { + l = len(*m.NoProxy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SecretBuildSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DestinationDir) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SecretLocalReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SecretSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.SecretSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MountPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SourceBuildStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.PullSecret != nil { + l = m.PullSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Scripts) + n += 1 + l + sovGenerated(uint64(l)) + if m.Incremental != nil { + n += 2 + } + n += 2 + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SourceControlUser) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Email) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SourceRevision) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Git != nil { + l = m.Git.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SourceStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Incremental != nil { + n += 2 + } + return n +} + +func (m *StageInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.DurationMilliseconds)) + if len(m.Steps) > 0 { + for _, e := range m.Steps { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StepInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.DurationMilliseconds)) + return n +} + +func (m *WebHookTrigger) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretReference != nil { + l = m.SecretReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *BinaryBuildRequestOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BinaryBuildRequestOptions{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `AsFile:` + fmt.Sprintf("%v", this.AsFile) + `,`, + `Commit:` + fmt.Sprintf("%v", this.Commit) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `AuthorName:` + fmt.Sprintf("%v", this.AuthorName) + `,`, + `AuthorEmail:` + fmt.Sprintf("%v", this.AuthorEmail) + `,`, + `CommitterName:` + fmt.Sprintf("%v", this.CommitterName) + `,`, + `CommitterEmail:` + fmt.Sprintf("%v", this.CommitterEmail) + `,`, + `}`, + }, "") + return s +} +func (this *BinaryBuildSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BinaryBuildSource{`, + `AsFile:` + fmt.Sprintf("%v", this.AsFile) + `,`, + `}`, + }, "") + return s +} +func (this *BitbucketWebHookCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BitbucketWebHookCause{`, + `CommonWebHookCause:` + strings.Replace(strings.Replace(this.CommonWebHookCause.String(), "CommonWebHookCause", "CommonWebHookCause", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Build) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Build{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "BuildSpec", "BuildSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "BuildStatus", "BuildStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildConfig{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "BuildConfigSpec", "BuildConfigSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "BuildConfigStatus", "BuildConfigStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildConfigList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]BuildConfig{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "BuildConfig", "BuildConfig", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&BuildConfigList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *BuildConfigSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForTriggers := "[]BuildTriggerPolicy{" + for _, f := range this.Triggers { + repeatedStringForTriggers += strings.Replace(strings.Replace(f.String(), "BuildTriggerPolicy", "BuildTriggerPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForTriggers += "}" + s := strings.Join([]string{`&BuildConfigSpec{`, + `Triggers:` + repeatedStringForTriggers + `,`, + `RunPolicy:` + fmt.Sprintf("%v", this.RunPolicy) + `,`, + `CommonSpec:` + strings.Replace(strings.Replace(this.CommonSpec.String(), "CommonSpec", "CommonSpec", 1), `&`, ``, 1) + `,`, + `SuccessfulBuildsHistoryLimit:` + valueToStringGenerated(this.SuccessfulBuildsHistoryLimit) + `,`, + `FailedBuildsHistoryLimit:` + valueToStringGenerated(this.FailedBuildsHistoryLimit) + `,`, + `}`, + }, "") + return s +} +func (this *BuildConfigStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForImageChangeTriggers := "[]ImageChangeTriggerStatus{" + for _, f := range this.ImageChangeTriggers { + repeatedStringForImageChangeTriggers += strings.Replace(strings.Replace(f.String(), "ImageChangeTriggerStatus", "ImageChangeTriggerStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForImageChangeTriggers += "}" + s := strings.Join([]string{`&BuildConfigStatus{`, + `LastVersion:` + fmt.Sprintf("%v", this.LastVersion) + `,`, + `ImageChangeTriggers:` + repeatedStringForImageChangeTriggers + `,`, + `}`, + }, "") + return s +} +func (this *BuildList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Build{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Build", "Build", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&BuildList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *BuildLog) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildLog{`, + `}`, + }, "") + return s +} +func (this *BuildLogOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildLogOptions{`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, + `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, + `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, + `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "v1.Time", 1) + `,`, + `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, + `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, + `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, + `NoWait:` + fmt.Sprintf("%v", this.NoWait) + `,`, + `Version:` + valueToStringGenerated(this.Version) + `,`, + `InsecureSkipTLSVerifyBackend:` + fmt.Sprintf("%v", this.InsecureSkipTLSVerifyBackend) + `,`, + `}`, + }, "") + return s +} +func (this *BuildOutput) String() string { + if this == nil { + return "nil" + } + repeatedStringForImageLabels := "[]ImageLabel{" + for _, f := range this.ImageLabels { + repeatedStringForImageLabels += strings.Replace(strings.Replace(f.String(), "ImageLabel", "ImageLabel", 1), `&`, ``, 1) + "," + } + repeatedStringForImageLabels += "}" + s := strings.Join([]string{`&BuildOutput{`, + `To:` + strings.Replace(fmt.Sprintf("%v", this.To), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `PushSecret:` + strings.Replace(fmt.Sprintf("%v", this.PushSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `ImageLabels:` + repeatedStringForImageLabels + `,`, + `}`, + }, "") + return s +} +func (this *BuildPostCommitSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildPostCommitSpec{`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `Script:` + fmt.Sprintf("%v", this.Script) + `,`, + `}`, + }, "") + return s +} +func (this *BuildRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + repeatedStringForTriggeredBy := "[]BuildTriggerCause{" + for _, f := range this.TriggeredBy { + repeatedStringForTriggeredBy += strings.Replace(strings.Replace(f.String(), "BuildTriggerCause", "BuildTriggerCause", 1), `&`, ``, 1) + "," + } + repeatedStringForTriggeredBy += "}" + s := strings.Join([]string{`&BuildRequest{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`, + `TriggeredByImage:` + strings.Replace(fmt.Sprintf("%v", this.TriggeredByImage), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `Binary:` + strings.Replace(this.Binary.String(), "BinaryBuildSource", "BinaryBuildSource", 1) + `,`, + `LastVersion:` + valueToStringGenerated(this.LastVersion) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `TriggeredBy:` + repeatedStringForTriggeredBy + `,`, + `DockerStrategyOptions:` + strings.Replace(this.DockerStrategyOptions.String(), "DockerStrategyOptions", "DockerStrategyOptions", 1) + `,`, + `SourceStrategyOptions:` + strings.Replace(this.SourceStrategyOptions.String(), "SourceStrategyOptions", "SourceStrategyOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForImages := "[]ImageSource{" + for _, f := range this.Images { + repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ImageSource", "ImageSource", 1), `&`, ``, 1) + "," + } + repeatedStringForImages += "}" + repeatedStringForSecrets := "[]SecretBuildSource{" + for _, f := range this.Secrets { + repeatedStringForSecrets += strings.Replace(strings.Replace(f.String(), "SecretBuildSource", "SecretBuildSource", 1), `&`, ``, 1) + "," + } + repeatedStringForSecrets += "}" + repeatedStringForConfigMaps := "[]ConfigMapBuildSource{" + for _, f := range this.ConfigMaps { + repeatedStringForConfigMaps += strings.Replace(strings.Replace(f.String(), "ConfigMapBuildSource", "ConfigMapBuildSource", 1), `&`, ``, 1) + "," + } + repeatedStringForConfigMaps += "}" + s := strings.Join([]string{`&BuildSource{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Binary:` + strings.Replace(this.Binary.String(), "BinaryBuildSource", "BinaryBuildSource", 1) + `,`, + `Dockerfile:` + valueToStringGenerated(this.Dockerfile) + `,`, + `Git:` + strings.Replace(this.Git.String(), "GitBuildSource", "GitBuildSource", 1) + `,`, + `Images:` + repeatedStringForImages + `,`, + `ContextDir:` + fmt.Sprintf("%v", this.ContextDir) + `,`, + `SourceSecret:` + strings.Replace(fmt.Sprintf("%v", this.SourceSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `Secrets:` + repeatedStringForSecrets + `,`, + `ConfigMaps:` + repeatedStringForConfigMaps + `,`, + `}`, + }, "") + return s +} +func (this *BuildSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForTriggeredBy := "[]BuildTriggerCause{" + for _, f := range this.TriggeredBy { + repeatedStringForTriggeredBy += strings.Replace(strings.Replace(f.String(), "BuildTriggerCause", "BuildTriggerCause", 1), `&`, ``, 1) + "," + } + repeatedStringForTriggeredBy += "}" + s := strings.Join([]string{`&BuildSpec{`, + `CommonSpec:` + strings.Replace(strings.Replace(this.CommonSpec.String(), "CommonSpec", "CommonSpec", 1), `&`, ``, 1) + `,`, + `TriggeredBy:` + repeatedStringForTriggeredBy + `,`, + `}`, + }, "") + return s +} +func (this *BuildStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForStages := "[]StageInfo{" + for _, f := range this.Stages { + repeatedStringForStages += strings.Replace(strings.Replace(f.String(), "StageInfo", "StageInfo", 1), `&`, ``, 1) + "," + } + repeatedStringForStages += "}" + repeatedStringForConditions := "[]BuildCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "BuildCondition", "BuildCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&BuildStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Cancelled:` + fmt.Sprintf("%v", this.Cancelled) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `StartTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.StartTimestamp), "Time", "v1.Time", 1) + `,`, + `CompletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTimestamp), "Time", "v1.Time", 1) + `,`, + `Duration:` + fmt.Sprintf("%v", this.Duration) + `,`, + `OutputDockerImageReference:` + fmt.Sprintf("%v", this.OutputDockerImageReference) + `,`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `Output:` + strings.Replace(strings.Replace(this.Output.String(), "BuildStatusOutput", "BuildStatusOutput", 1), `&`, ``, 1) + `,`, + `Stages:` + repeatedStringForStages + `,`, + `LogSnippet:` + fmt.Sprintf("%v", this.LogSnippet) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *BuildStatusOutput) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildStatusOutput{`, + `To:` + strings.Replace(this.To.String(), "BuildStatusOutputTo", "BuildStatusOutputTo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildStatusOutputTo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildStatusOutputTo{`, + `ImageDigest:` + fmt.Sprintf("%v", this.ImageDigest) + `,`, + `}`, + }, "") + return s +} +func (this *BuildStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `DockerStrategy:` + strings.Replace(this.DockerStrategy.String(), "DockerBuildStrategy", "DockerBuildStrategy", 1) + `,`, + `SourceStrategy:` + strings.Replace(this.SourceStrategy.String(), "SourceBuildStrategy", "SourceBuildStrategy", 1) + `,`, + `CustomStrategy:` + strings.Replace(this.CustomStrategy.String(), "CustomBuildStrategy", "CustomBuildStrategy", 1) + `,`, + `JenkinsPipelineStrategy:` + strings.Replace(this.JenkinsPipelineStrategy.String(), "JenkinsPipelineBuildStrategy", "JenkinsPipelineBuildStrategy", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildTriggerCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildTriggerCause{`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `GenericWebHook:` + strings.Replace(this.GenericWebHook.String(), "GenericWebHookCause", "GenericWebHookCause", 1) + `,`, + `GitHubWebHook:` + strings.Replace(this.GitHubWebHook.String(), "GitHubWebHookCause", "GitHubWebHookCause", 1) + `,`, + `ImageChangeBuild:` + strings.Replace(this.ImageChangeBuild.String(), "ImageChangeCause", "ImageChangeCause", 1) + `,`, + `GitLabWebHook:` + strings.Replace(this.GitLabWebHook.String(), "GitLabWebHookCause", "GitLabWebHookCause", 1) + `,`, + `BitbucketWebHook:` + strings.Replace(this.BitbucketWebHook.String(), "BitbucketWebHookCause", "BitbucketWebHookCause", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildTriggerPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildTriggerPolicy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `GitHubWebHook:` + strings.Replace(this.GitHubWebHook.String(), "WebHookTrigger", "WebHookTrigger", 1) + `,`, + `GenericWebHook:` + strings.Replace(this.GenericWebHook.String(), "WebHookTrigger", "WebHookTrigger", 1) + `,`, + `ImageChange:` + strings.Replace(this.ImageChange.String(), "ImageChangeTrigger", "ImageChangeTrigger", 1) + `,`, + `GitLabWebHook:` + strings.Replace(this.GitLabWebHook.String(), "WebHookTrigger", "WebHookTrigger", 1) + `,`, + `BitbucketWebHook:` + strings.Replace(this.BitbucketWebHook.String(), "WebHookTrigger", "WebHookTrigger", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildVolume) String() string { + if this == nil { + return "nil" + } + repeatedStringForMounts := "[]BuildVolumeMount{" + for _, f := range this.Mounts { + repeatedStringForMounts += strings.Replace(strings.Replace(f.String(), "BuildVolumeMount", "BuildVolumeMount", 1), `&`, ``, 1) + "," + } + repeatedStringForMounts += "}" + s := strings.Join([]string{`&BuildVolume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "BuildVolumeSource", "BuildVolumeSource", 1), `&`, ``, 1) + `,`, + `Mounts:` + repeatedStringForMounts + `,`, + `}`, + }, "") + return s +} +func (this *BuildVolumeMount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildVolumeMount{`, + `DestinationPath:` + fmt.Sprintf("%v", this.DestinationPath) + `,`, + `}`, + }, "") + return s +} +func (this *BuildVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildVolumeSource{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretVolumeSource", "v11.SecretVolumeSource", 1) + `,`, + `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapVolumeSource", "v11.ConfigMapVolumeSource", 1) + `,`, + `CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIVolumeSource", "v11.CSIVolumeSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CommonSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CommonSpec{`, + `ServiceAccount:` + fmt.Sprintf("%v", this.ServiceAccount) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "BuildSource", "BuildSource", 1), `&`, ``, 1) + `,`, + `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "BuildStrategy", "BuildStrategy", 1), `&`, ``, 1) + `,`, + `Output:` + strings.Replace(strings.Replace(this.Output.String(), "BuildOutput", "BuildOutput", 1), `&`, ``, 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "v11.ResourceRequirements", 1), `&`, ``, 1) + `,`, + `PostCommit:` + strings.Replace(strings.Replace(this.PostCommit.String(), "BuildPostCommitSpec", "BuildPostCommitSpec", 1), `&`, ``, 1) + `,`, + `CompletionDeadlineSeconds:` + valueToStringGenerated(this.CompletionDeadlineSeconds) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "OptionalNodeSelector", "OptionalNodeSelector", 1) + `,`, + `MountTrustedCA:` + valueToStringGenerated(this.MountTrustedCA) + `,`, + `}`, + }, "") + return s +} +func (this *CommonWebHookCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CommonWebHookCause{`, + `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapBuildSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapBuildSource{`, + `ConfigMap:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "LocalObjectReference", "v11.LocalObjectReference", 1), `&`, ``, 1) + `,`, + `DestinationDir:` + fmt.Sprintf("%v", this.DestinationDir) + `,`, + `}`, + }, "") + return s +} +func (this *CustomBuildStrategy) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + repeatedStringForSecrets := "[]SecretSpec{" + for _, f := range this.Secrets { + repeatedStringForSecrets += strings.Replace(strings.Replace(f.String(), "SecretSpec", "SecretSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForSecrets += "}" + s := strings.Join([]string{`&CustomBuildStrategy{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `ExposeDockerSocket:` + fmt.Sprintf("%v", this.ExposeDockerSocket) + `,`, + `ForcePull:` + fmt.Sprintf("%v", this.ForcePull) + `,`, + `Secrets:` + repeatedStringForSecrets + `,`, + `BuildAPIVersion:` + fmt.Sprintf("%v", this.BuildAPIVersion) + `,`, + `}`, + }, "") + return s +} +func (this *DockerBuildStrategy) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + repeatedStringForBuildArgs := "[]EnvVar{" + for _, f := range this.BuildArgs { + repeatedStringForBuildArgs += fmt.Sprintf("%v", f) + "," + } + repeatedStringForBuildArgs += "}" + repeatedStringForVolumes := "[]BuildVolume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += strings.Replace(strings.Replace(f.String(), "BuildVolume", "BuildVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumes += "}" + s := strings.Join([]string{`&DockerBuildStrategy{`, + `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `NoCache:` + fmt.Sprintf("%v", this.NoCache) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `ForcePull:` + fmt.Sprintf("%v", this.ForcePull) + `,`, + `DockerfilePath:` + fmt.Sprintf("%v", this.DockerfilePath) + `,`, + `BuildArgs:` + repeatedStringForBuildArgs + `,`, + `ImageOptimizationPolicy:` + valueToStringGenerated(this.ImageOptimizationPolicy) + `,`, + `Volumes:` + repeatedStringForVolumes + `,`, + `}`, + }, "") + return s +} +func (this *DockerStrategyOptions) String() string { + if this == nil { + return "nil" + } + repeatedStringForBuildArgs := "[]EnvVar{" + for _, f := range this.BuildArgs { + repeatedStringForBuildArgs += fmt.Sprintf("%v", f) + "," + } + repeatedStringForBuildArgs += "}" + s := strings.Join([]string{`&DockerStrategyOptions{`, + `BuildArgs:` + repeatedStringForBuildArgs + `,`, + `NoCache:` + valueToStringGenerated(this.NoCache) + `,`, + `}`, + }, "") + return s +} +func (this *GenericWebHookCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GenericWebHookCause{`, + `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `}`, + }, "") + return s +} +func (this *GenericWebHookEvent) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + s := strings.Join([]string{`&GenericWebHookEvent{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Git:` + strings.Replace(this.Git.String(), "GitInfo", "GitInfo", 1) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `DockerStrategyOptions:` + strings.Replace(this.DockerStrategyOptions.String(), "DockerStrategyOptions", "DockerStrategyOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GitBuildSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitBuildSource{`, + `URI:` + fmt.Sprintf("%v", this.URI) + `,`, + `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, + `ProxyConfig:` + strings.Replace(strings.Replace(this.ProxyConfig.String(), "ProxyConfig", "ProxyConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *GitHubWebHookCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitHubWebHookCause{`, + `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `}`, + }, "") + return s +} +func (this *GitInfo) String() string { + if this == nil { + return "nil" + } + repeatedStringForRefs := "[]GitRefInfo{" + for _, f := range this.Refs { + repeatedStringForRefs += strings.Replace(strings.Replace(f.String(), "GitRefInfo", "GitRefInfo", 1), `&`, ``, 1) + "," + } + repeatedStringForRefs += "}" + s := strings.Join([]string{`&GitInfo{`, + `GitBuildSource:` + strings.Replace(strings.Replace(this.GitBuildSource.String(), "GitBuildSource", "GitBuildSource", 1), `&`, ``, 1) + `,`, + `GitSourceRevision:` + strings.Replace(strings.Replace(this.GitSourceRevision.String(), "GitSourceRevision", "GitSourceRevision", 1), `&`, ``, 1) + `,`, + `Refs:` + repeatedStringForRefs + `,`, + `}`, + }, "") + return s +} +func (this *GitLabWebHookCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitLabWebHookCause{`, + `CommonWebHookCause:` + strings.Replace(strings.Replace(this.CommonWebHookCause.String(), "CommonWebHookCause", "CommonWebHookCause", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *GitRefInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitRefInfo{`, + `GitBuildSource:` + strings.Replace(strings.Replace(this.GitBuildSource.String(), "GitBuildSource", "GitBuildSource", 1), `&`, ``, 1) + `,`, + `GitSourceRevision:` + strings.Replace(strings.Replace(this.GitSourceRevision.String(), "GitSourceRevision", "GitSourceRevision", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *GitSourceRevision) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitSourceRevision{`, + `Commit:` + fmt.Sprintf("%v", this.Commit) + `,`, + `Author:` + strings.Replace(strings.Replace(this.Author.String(), "SourceControlUser", "SourceControlUser", 1), `&`, ``, 1) + `,`, + `Committer:` + strings.Replace(strings.Replace(this.Committer.String(), "SourceControlUser", "SourceControlUser", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ImageChangeCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageChangeCause{`, + `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, + `FromRef:` + strings.Replace(fmt.Sprintf("%v", this.FromRef), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageChangeTrigger) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageChangeTrigger{`, + `LastTriggeredImageID:` + fmt.Sprintf("%v", this.LastTriggeredImageID) + `,`, + `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `}`, + }, "") + return s +} +func (this *ImageChangeTriggerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageChangeTriggerStatus{`, + `LastTriggeredImageID:` + fmt.Sprintf("%v", this.LastTriggeredImageID) + `,`, + `From:` + strings.Replace(strings.Replace(this.From.String(), "ImageStreamTagReference", "ImageStreamTagReference", 1), `&`, ``, 1) + `,`, + `LastTriggerTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTriggerTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageLabel) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageLabel{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *ImageSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForPaths := "[]ImageSourcePath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "ImageSourcePath", "ImageSourcePath", 1), `&`, ``, 1) + "," + } + repeatedStringForPaths += "}" + s := strings.Join([]string{`&ImageSource{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `Paths:` + repeatedStringForPaths + `,`, + `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `As:` + fmt.Sprintf("%v", this.As) + `,`, + `}`, + }, "") + return s +} +func (this *ImageSourcePath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageSourcePath{`, + `SourcePath:` + fmt.Sprintf("%v", this.SourcePath) + `,`, + `DestinationDir:` + fmt.Sprintf("%v", this.DestinationDir) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamTagReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageStreamTagReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *JenkinsPipelineBuildStrategy) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + s := strings.Join([]string{`&JenkinsPipelineBuildStrategy{`, + `JenkinsfilePath:` + fmt.Sprintf("%v", this.JenkinsfilePath) + `,`, + `Jenkinsfile:` + fmt.Sprintf("%v", this.Jenkinsfile) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `}`, + }, "") + return s +} +func (this *ProxyConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProxyConfig{`, + `HTTPProxy:` + valueToStringGenerated(this.HTTPProxy) + `,`, + `HTTPSProxy:` + valueToStringGenerated(this.HTTPSProxy) + `,`, + `NoProxy:` + valueToStringGenerated(this.NoProxy) + `,`, + `}`, + }, "") + return s +} +func (this *SecretBuildSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretBuildSource{`, + `Secret:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Secret), "LocalObjectReference", "v11.LocalObjectReference", 1), `&`, ``, 1) + `,`, + `DestinationDir:` + fmt.Sprintf("%v", this.DestinationDir) + `,`, + `}`, + }, "") + return s +} +func (this *SecretLocalReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretLocalReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *SecretSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretSpec{`, + `SecretSource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SecretSource), "LocalObjectReference", "v11.LocalObjectReference", 1), `&`, ``, 1) + `,`, + `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, + `}`, + }, "") + return s +} +func (this *SourceBuildStrategy) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + repeatedStringForVolumes := "[]BuildVolume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += strings.Replace(strings.Replace(f.String(), "BuildVolume", "BuildVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumes += "}" + s := strings.Join([]string{`&SourceBuildStrategy{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `Scripts:` + fmt.Sprintf("%v", this.Scripts) + `,`, + `Incremental:` + valueToStringGenerated(this.Incremental) + `,`, + `ForcePull:` + fmt.Sprintf("%v", this.ForcePull) + `,`, + `Volumes:` + repeatedStringForVolumes + `,`, + `}`, + }, "") + return s +} +func (this *SourceControlUser) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceControlUser{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Email:` + fmt.Sprintf("%v", this.Email) + `,`, + `}`, + }, "") + return s +} +func (this *SourceRevision) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceRevision{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Git:` + strings.Replace(this.Git.String(), "GitSourceRevision", "GitSourceRevision", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SourceStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceStrategyOptions{`, + `Incremental:` + valueToStringGenerated(this.Incremental) + `,`, + `}`, + }, "") + return s +} +func (this *StageInfo) String() string { + if this == nil { + return "nil" + } + repeatedStringForSteps := "[]StepInfo{" + for _, f := range this.Steps { + repeatedStringForSteps += strings.Replace(strings.Replace(f.String(), "StepInfo", "StepInfo", 1), `&`, ``, 1) + "," + } + repeatedStringForSteps += "}" + s := strings.Join([]string{`&StageInfo{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `StartTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `DurationMilliseconds:` + fmt.Sprintf("%v", this.DurationMilliseconds) + `,`, + `Steps:` + repeatedStringForSteps + `,`, + `}`, + }, "") + return s +} +func (this *StepInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StepInfo{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `StartTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `DurationMilliseconds:` + fmt.Sprintf("%v", this.DurationMilliseconds) + `,`, + `}`, + }, "") + return s +} +func (this *WebHookTrigger) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebHookTrigger{`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `AllowEnv:` + fmt.Sprintf("%v", this.AllowEnv) + `,`, + `SecretReference:` + strings.Replace(this.SecretReference.String(), "SecretLocalReference", "SecretLocalReference", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *BinaryBuildRequestOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BinaryBuildRequestOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BinaryBuildRequestOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AsFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AsFile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthorName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuthorName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthorEmail", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuthorEmail = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommitterName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CommitterName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommitterEmail", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CommitterEmail = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BinaryBuildSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BinaryBuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BinaryBuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AsFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AsFile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BitbucketWebHookCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BitbucketWebHookCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BitbucketWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonWebHookCause", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CommonWebHookCause.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Build) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Build: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Build: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildConfigList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildConfigList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildConfigList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, BuildConfig{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildConfigSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildConfigSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildConfigSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Triggers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Triggers = append(m.Triggers, BuildTriggerPolicy{}) + if err := m.Triggers[len(m.Triggers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RunPolicy = BuildRunPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CommonSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessfulBuildsHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SuccessfulBuildsHistoryLimit = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailedBuildsHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FailedBuildsHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildConfigStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildConfigStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildConfigStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastVersion", wireType) + } + m.LastVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastVersion |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageChangeTriggers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageChangeTriggers = append(m.ImageChangeTriggers, ImageChangeTriggerStatus{}) + if err := m.ImageChangeTriggers[len(m.ImageChangeTriggers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Build{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildLog) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildLog: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildLog: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildLogOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildLogOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildLogOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Follow = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Previous", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Previous = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SinceSeconds = &v + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SinceTime == nil { + m.SinceTime = &v1.Time{} + } + if err := m.SinceTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Timestamps = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TailLines", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TailLines = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitBytes", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LimitBytes = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoWait", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoWait = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Version = &v + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InsecureSkipTLSVerifyBackend", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.InsecureSkipTLSVerifyBackend = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildOutput) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildOutput: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildOutput: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.To == nil { + m.To = &v11.ObjectReference{} + } + if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PushSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PushSecret == nil { + m.PushSecret = &v11.LocalObjectReference{} + } + if err := m.PushSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageLabels = append(m.ImageLabels, ImageLabel{}) + if err := m.ImageLabels[len(m.ImageLabels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildPostCommitSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildPostCommitSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildPostCommitSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Script", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Script = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TriggeredByImage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TriggeredByImage == nil { + m.TriggeredByImage = &v11.ObjectReference{} + } + if err := m.TriggeredByImage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.From == nil { + m.From = &v11.ObjectReference{} + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Binary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Binary == nil { + m.Binary = &BinaryBuildSource{} + } + if err := m.Binary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastVersion", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LastVersion = &v + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TriggeredBy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TriggeredBy = append(m.TriggeredBy, BuildTriggerCause{}) + if err := m.TriggeredBy[len(m.TriggeredBy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerStrategyOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DockerStrategyOptions == nil { + m.DockerStrategyOptions = &DockerStrategyOptions{} + } + if err := m.DockerStrategyOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceStrategyOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceStrategyOptions == nil { + m.SourceStrategyOptions = &SourceStrategyOptions{} + } + if err := m.SourceStrategyOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Binary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Binary == nil { + m.Binary = &BinaryBuildSource{} + } + if err := m.Binary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dockerfile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Dockerfile = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Git", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Git == nil { + m.Git = &GitBuildSource{} + } + if err := m.Git.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, ImageSource{}) + if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContextDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContextDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceSecret == nil { + m.SourceSecret = &v11.LocalObjectReference{} + } + if err := m.SourceSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, SecretBuildSource{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMaps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigMaps = append(m.ConfigMaps, ConfigMapBuildSource{}) + if err := m.ConfigMaps[len(m.ConfigMaps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CommonSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TriggeredBy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TriggeredBy = append(m.TriggeredBy, BuildTriggerCause{}) + if err := m.TriggeredBy[len(m.TriggeredBy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = BuildPhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cancelled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Cancelled = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = StatusReason(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartTimestamp == nil { + m.StartTimestamp = &v1.Time{} + } + if err := m.StartTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompletionTimestamp == nil { + m.CompletionTimestamp = &v1.Time{} + } + if err := m.CompletionTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Duration |= time.Duration(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OutputDockerImageReference", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OutputDockerImageReference = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &v11.ObjectReference{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Output.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stages", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stages = append(m.Stages, StageInfo{}) + if err := m.Stages[len(m.Stages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSnippet", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSnippet = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, BuildCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildStatusOutput) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildStatusOutput: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildStatusOutput: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.To == nil { + m.To = &BuildStatusOutputTo{} + } + if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildStatusOutputTo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildStatusOutputTo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildStatusOutputTo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageDigest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageDigest = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DockerStrategy == nil { + m.DockerStrategy = &DockerBuildStrategy{} + } + if err := m.DockerStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceStrategy == nil { + m.SourceStrategy = &SourceBuildStrategy{} + } + if err := m.SourceStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CustomStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CustomStrategy == nil { + m.CustomStrategy = &CustomBuildStrategy{} + } + if err := m.CustomStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JenkinsPipelineStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.JenkinsPipelineStrategy == nil { + m.JenkinsPipelineStrategy = &JenkinsPipelineBuildStrategy{} + } + if err := m.JenkinsPipelineStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildTriggerCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildTriggerCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildTriggerCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenericWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GenericWebHook == nil { + m.GenericWebHook = &GenericWebHookCause{} + } + if err := m.GenericWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitHubWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GitHubWebHook == nil { + m.GitHubWebHook = &GitHubWebHookCause{} + } + if err := m.GitHubWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageChangeBuild", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageChangeBuild == nil { + m.ImageChangeBuild = &ImageChangeCause{} + } + if err := m.ImageChangeBuild.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitLabWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GitLabWebHook == nil { + m.GitLabWebHook = &GitLabWebHookCause{} + } + if err := m.GitLabWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BitbucketWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BitbucketWebHook == nil { + m.BitbucketWebHook = &BitbucketWebHookCause{} + } + if err := m.BitbucketWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildTriggerPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildTriggerPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildTriggerPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildTriggerType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitHubWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GitHubWebHook == nil { + m.GitHubWebHook = &WebHookTrigger{} + } + if err := m.GitHubWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenericWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GenericWebHook == nil { + m.GenericWebHook = &WebHookTrigger{} + } + if err := m.GenericWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageChange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageChange == nil { + m.ImageChange = &ImageChangeTrigger{} + } + if err := m.ImageChange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitLabWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GitLabWebHook == nil { + m.GitLabWebHook = &WebHookTrigger{} + } + if err := m.GitLabWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BitbucketWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BitbucketWebHook == nil { + m.BitbucketWebHook = &WebHookTrigger{} + } + if err := m.BitbucketWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mounts = append(m.Mounts, BuildVolumeMount{}) + if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildVolumeMount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildVolumeMount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildVolumeMount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildVolumeSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &v11.SecretVolumeSource{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMap == nil { + m.ConfigMap = &v11.ConfigMapVolumeSource{} + } + if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CSI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CSI == nil { + m.CSI = &v11.CSIVolumeSource{} + } + if err := m.CSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommonSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommonSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommonSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccount = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Output.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PostCommit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PostCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CompletionDeadlineSeconds = &v + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = OptionalNodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MountTrustedCA", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.MountTrustedCA = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommonWebHookCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommonWebHookCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommonWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapBuildSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapBuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapBuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomBuildStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomBuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullSecret == nil { + m.PullSecret = &v11.LocalObjectReference{} + } + if err := m.PullSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExposeDockerSocket", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExposeDockerSocket = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForcePull", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ForcePull = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, SecretSpec{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildAPIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BuildAPIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DockerBuildStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DockerBuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DockerBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.From == nil { + m.From = &v11.ObjectReference{} + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullSecret == nil { + m.PullSecret = &v11.LocalObjectReference{} + } + if err := m.PullSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoCache", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoCache = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForcePull", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ForcePull = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerfilePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerfilePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildArgs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BuildArgs = append(m.BuildArgs, v11.EnvVar{}) + if err := m.BuildArgs[len(m.BuildArgs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageOptimizationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ImageOptimizationPolicy(dAtA[iNdEx:postIndex]) + m.ImageOptimizationPolicy = &s + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, BuildVolume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DockerStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DockerStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DockerStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildArgs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BuildArgs = append(m.BuildArgs, v11.EnvVar{}) + if err := m.BuildArgs[len(m.BuildArgs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoCache", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.NoCache = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenericWebHookCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenericWebHookCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenericWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenericWebHookEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenericWebHookEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenericWebHookEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Git", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Git == nil { + m.Git = &GitInfo{} + } + if err := m.Git.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerStrategyOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DockerStrategyOptions == nil { + m.DockerStrategyOptions = &DockerStrategyOptions{} + } + if err := m.DockerStrategyOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitBuildSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitBuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitBuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProxyConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProxyConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitHubWebHookCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitHubWebHookCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitHubWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitBuildSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GitBuildSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitSourceRevision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GitSourceRevision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Refs = append(m.Refs, GitRefInfo{}) + if err := m.Refs[len(m.Refs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitLabWebHookCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitLabWebHookCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitLabWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonWebHookCause", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CommonWebHookCause.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitRefInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitRefInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitRefInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitBuildSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GitBuildSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitSourceRevision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GitSourceRevision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitSourceRevision) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitSourceRevision: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitSourceRevision: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Author", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Author.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Committer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Committer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageChangeCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageChangeCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageChangeCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FromRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FromRef == nil { + m.FromRef = &v11.ObjectReference{} + } + if err := m.FromRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageChangeTrigger) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageChangeTrigger: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageChangeTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTriggeredImageID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastTriggeredImageID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.From == nil { + m.From = &v11.ObjectReference{} + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageChangeTriggerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageChangeTriggerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageChangeTriggerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTriggeredImageID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastTriggeredImageID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTriggerTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTriggerTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageLabel) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageLabel: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageLabel: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, ImageSourcePath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullSecret == nil { + m.PullSecret = &v11.LocalObjectReference{} + } + if err := m.PullSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field As", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.As = append(m.As, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageSourcePath) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageSourcePath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageSourcePath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourcePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourcePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamTagReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamTagReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamTagReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JenkinsPipelineBuildStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JenkinsPipelineBuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JenkinsPipelineBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JenkinsfilePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JenkinsfilePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Jenkinsfile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Jenkinsfile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OptionalNodeSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OptionalNodeSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OptionalNodeSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if *m == nil { + *m = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + (*m)[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProxyConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProxyConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProxyConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.HTTPProxy = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPSProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.HTTPSProxy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NoProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NoProxy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretBuildSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretBuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretBuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretLocalReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretLocalReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretLocalReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SecretSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceBuildStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceBuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullSecret == nil { + m.PullSecret = &v11.LocalObjectReference{} + } + if err := m.PullSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scripts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scripts = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Incremental", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Incremental = &b + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForcePull", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ForcePull = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, BuildVolume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceControlUser) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceControlUser: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceControlUser: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Email", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Email = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceRevision) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceRevision: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceRevision: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Git", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Git == nil { + m.Git = &GitSourceRevision{} + } + if err := m.Git.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Incremental", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Incremental = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StageInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StageInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StageInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = StageName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationMilliseconds", wireType) + } + m.DurationMilliseconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationMilliseconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Steps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Steps = append(m.Steps, StepInfo{}) + if err := m.Steps[len(m.Steps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StepInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StepInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StepInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = StepName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationMilliseconds", wireType) + } + m.DurationMilliseconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationMilliseconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebHookTrigger) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebHookTrigger: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebHookTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowEnv", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowEnv = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretReference == nil { + m.SecretReference = &SecretLocalReference{} + } + if err := m.SecretReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/build/v1/generated.proto b/vendor/github.com/openshift/api/build/v1/generated.proto new file mode 100644 index 0000000000000..92ae73426c82d --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/generated.proto @@ -0,0 +1,1239 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.build.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/build/v1"; + +// BinaryBuildRequestOptions are the options required to fully speficy a binary build request +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BinaryBuildRequestOptions { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // asFile determines if the binary should be created as a file within the source rather than extracted as an archive + optional string asFile = 2; + + // revision.commit is the value identifying a specific commit + optional string revisionCommit = 3; + + // revision.message is the description of a specific commit + optional string revisionMessage = 4; + + // revision.authorName of the source control user + optional string revisionAuthorName = 5; + + // revision.authorEmail of the source control user + optional string revisionAuthorEmail = 6; + + // revision.committerName of the source control user + optional string revisionCommitterName = 7; + + // revision.committerEmail of the source control user + optional string revisionCommitterEmail = 8; +} + +// BinaryBuildSource describes a binary file to be used for the Docker and Source build strategies, +// where the file will be extracted and used as the build source. +message BinaryBuildSource { + // asFile indicates that the provided binary input should be considered a single file + // within the build input. For example, specifying "webapp.war" would place the provided + // binary as `/webapp.war` for the builder. If left empty, the Docker and Source build + // strategies assume this file is a zip, tar, or tar.gz file and extract it as the source. + // The custom strategy receives this binary as standard input. This filename may not + // contain slashes or be '..' or '.'. + optional string asFile = 1; +} + +// BitbucketWebHookCause has information about a Bitbucket webhook that triggered a +// build. +message BitbucketWebHookCause { + optional CommonWebHookCause commonSpec = 1; +} + +// Build encapsulates the inputs needed to produce a new deployable image, as well as +// the status of the execution and a reference to the Pod which executed the build. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message Build { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is all the inputs used to execute the build. + optional BuildSpec spec = 2; + + // status is the current status of the build. + // +optional + optional BuildStatus status = 3; +} + +// BuildCondition describes the state of a build at a certain point. +message BuildCondition { + // type of build condition. + optional string type = 1; + + // status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time this condition was updated. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + + // The last time the condition transitioned from one status to another. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + optional string reason = 4; + + // A human readable message indicating details about the transition. + optional string message = 5; +} + +// Build configurations define a build process for new container images. There are three types of builds possible - a container image build using a Dockerfile, a Source-to-Image build that uses a specially prepared base image that accepts source code that it can make runnable, and a custom build that can run // arbitrary container images as a base and accept the build parameters. Builds run on the cluster and on completion are pushed to the container image registry specified in the "output" section. A build can be triggered via a webhook, when the base image changes, or when a user manually requests a new build be // created. +// +// Each build created by a build configuration is numbered and refers back to its parent configuration. Multiple builds can be triggered at once. Builds that do not have "output" set can be used to test code or run a verification build. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BuildConfig { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec holds all the input necessary to produce a new build, and the conditions when + // to trigger them. + optional BuildConfigSpec spec = 2; + + // status holds any relevant information about a build config + // +optional + optional BuildConfigStatus status = 3; +} + +// BuildConfigList is a collection of BuildConfigs. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BuildConfigList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of build configs + repeated BuildConfig items = 2; +} + +// BuildConfigSpec describes when and how builds are created +message BuildConfigSpec { + // triggers determine how new Builds can be launched from a BuildConfig. If + // no triggers are defined, a new build can only occur as a result of an + // explicit client build creation. + // +optional + repeated BuildTriggerPolicy triggers = 1; + + // runPolicy describes how the new build created from this build + // configuration will be scheduled for execution. + // This is optional, if not specified we default to "Serial". + optional string runPolicy = 2; + + // CommonSpec is the desired build specification + optional CommonSpec commonSpec = 3; + + // successfulBuildsHistoryLimit is the number of old successful builds to retain. + // When a BuildConfig is created, the 5 most recent successful builds are retained unless this value is set. + // If removed after the BuildConfig has been created, all successful builds are retained. + optional int32 successfulBuildsHistoryLimit = 4; + + // failedBuildsHistoryLimit is the number of old failed builds to retain. + // When a BuildConfig is created, the 5 most recent failed builds are retained unless this value is set. + // If removed after the BuildConfig has been created, all failed builds are retained. + optional int32 failedBuildsHistoryLimit = 5; +} + +// BuildConfigStatus contains current state of the build config object. +message BuildConfigStatus { + // lastVersion is used to inform about number of last triggered build. + optional int64 lastVersion = 1; + + // imageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, + // including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry + // in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger. + repeated ImageChangeTriggerStatus imageChangeTriggers = 2; +} + +// BuildList is a collection of Builds. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BuildList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of builds + repeated Build items = 2; +} + +// BuildLog is the (unused) resource associated with the build log redirector +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BuildLog { +} + +// BuildLogOptions is the REST options for a build log +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BuildLogOptions { + // cointainer for which to stream logs. Defaults to only container if there is one container in the pod. + optional string container = 1; + + // follow if true indicates that the build log should be streamed until + // the build terminates. + optional bool follow = 2; + + // previous returns previous build logs. Defaults to false. + optional bool previous = 3; + + // sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + optional int64 sinceSeconds = 4; + + // sinceTime is an RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5; + + // timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + optional bool timestamps = 6; + + // tailLines, If set, is the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + optional int64 tailLines = 7; + + // limitBytes, If set, is the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + optional int64 limitBytes = 8; + + // nowait if true causes the call to return immediately even if the build + // is not available yet. Otherwise the server will wait until the build has started. + // TODO: Fix the tag to 'noWait' in v2 + optional bool nowait = 9; + + // version of the build for which to view logs. + optional int64 version = 10; + + // insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the + // serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver + // and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real + // kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the + // connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept + // the actual log data coming from the real kubelet). + // +optional + optional bool insecureSkipTLSVerifyBackend = 11; +} + +// BuildOutput is input to a build strategy and describes the container image that the strategy +// should produce. +message BuildOutput { + // to defines an optional location to push the output of this build to. + // Kind must be one of 'ImageStreamTag' or 'DockerImage'. + // This value will be used to look up a container image repository to push to. + // In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of + // the build unless Namespace is specified. + optional .k8s.io.api.core.v1.ObjectReference to = 1; + + // pushSecret is the name of a Secret that would be used for setting + // up the authentication for executing the Docker push to authentication + // enabled Docker Registry (or Docker Hub). + optional .k8s.io.api.core.v1.LocalObjectReference pushSecret = 2; + + // imageLabels define a list of labels that are applied to the resulting image. If there + // are multiple labels with the same name then the last one in the list is used. + repeated ImageLabel imageLabels = 3; +} + +// A BuildPostCommitSpec holds a build post commit hook specification. The hook +// executes a command in a temporary container running the build output image, +// immediately after the last layer of the image is committed and before the +// image is pushed to a registry. The command is executed with the current +// working directory ($PWD) set to the image's WORKDIR. +// +// The build will be marked as failed if the hook execution fails. It will fail +// if the script or command return a non-zero exit code, or if there is any +// other error related to starting the temporary container. +// +// There are five different ways to configure the hook. As an example, all forms +// below are equivalent and will execute `rake test --verbose`. +// +// 1. Shell script: +// +// "postCommit": { +// "script": "rake test --verbose", +// } +// +// The above is a convenient form which is equivalent to: +// +// "postCommit": { +// "command": ["/bin/sh", "-ic"], +// "args": ["rake test --verbose"] +// } +// +// 2. A command as the image entrypoint: +// +// "postCommit": { +// "commit": ["rake", "test", "--verbose"] +// } +// +// Command overrides the image entrypoint in the exec form, as documented in +// Docker: https://docs.docker.com/engine/reference/builder/#entrypoint. +// +// 3. Pass arguments to the default entrypoint: +// +// "postCommit": { +// "args": ["rake", "test", "--verbose"] +// } +// +// This form is only useful if the image entrypoint can handle arguments. +// +// 4. Shell script with arguments: +// +// "postCommit": { +// "script": "rake test $1", +// "args": ["--verbose"] +// } +// +// This form is useful if you need to pass arguments that would otherwise be +// hard to quote properly in the shell script. In the script, $0 will be +// "/bin/sh" and $1, $2, etc, are the positional arguments from Args. +// +// 5. Command with arguments: +// +// "postCommit": { +// "command": ["rake", "test"], +// "args": ["--verbose"] +// } +// +// This form is equivalent to appending the arguments to the Command slice. +// +// It is invalid to provide both Script and Command simultaneously. If none of +// the fields are specified, the hook is not executed. +message BuildPostCommitSpec { + // command is the command to run. It may not be specified with Script. + // This might be needed if the image doesn't have `/bin/sh`, or if you + // do not want to use a shell. In all other cases, using Script might be + // more convenient. + repeated string command = 1; + + // args is a list of arguments that are provided to either Command, + // Script or the container image's default entrypoint. The arguments are + // placed immediately after the command to be run. + repeated string args = 2; + + // script is a shell script to be run with `/bin/sh -ic`. It may not be + // specified with Command. Use Script when a shell script is appropriate + // to execute the post build hook, for example for running unit tests + // with `rake test`. If you need control over the image entrypoint, or + // if the image does not have `/bin/sh`, use Command and/or Args. + // The `-i` flag is needed to support CentOS and RHEL images that use + // Software Collections (SCL), in order to have the appropriate + // collections enabled in the shell. E.g., in the Ruby image, this is + // necessary to make `ruby`, `bundle` and other binaries available in + // the PATH. + optional string script = 3; +} + +// BuildRequest is the resource used to pass parameters to build generator +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BuildRequest { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // revision is the information from the source for a specific repo snapshot. + optional SourceRevision revision = 2; + + // triggeredByImage is the Image that triggered this build. + optional .k8s.io.api.core.v1.ObjectReference triggeredByImage = 3; + + // from is the reference to the ImageStreamTag that triggered the build. + optional .k8s.io.api.core.v1.ObjectReference from = 4; + + // binary indicates a request to build from a binary provided to the builder + optional BinaryBuildSource binary = 5; + + // lastVersion (optional) is the LastVersion of the BuildConfig that was used + // to generate the build. If the BuildConfig in the generator doesn't match, a build will + // not be generated. + optional int64 lastVersion = 6; + + // env contains additional environment variables you want to pass into a builder container. + repeated .k8s.io.api.core.v1.EnvVar env = 7; + + // triggeredBy describes which triggers started the most recent update to the + // build configuration and contains information about those triggers. + repeated BuildTriggerCause triggeredBy = 8; + + // dockerStrategyOptions contains additional docker-strategy specific options for the build + optional DockerStrategyOptions dockerStrategyOptions = 9; + + // sourceStrategyOptions contains additional source-strategy specific options for the build + optional SourceStrategyOptions sourceStrategyOptions = 10; +} + +// BuildSource is the SCM used for the build. +message BuildSource { + // type of build input to accept + // +k8s:conversion-gen=false + // +optional + optional string type = 1; + + // binary builds accept a binary as their input. The binary is generally assumed to be a tar, + // gzipped tar, or zip file depending on the strategy. For container image builds, this is the build + // context and an optional Dockerfile may be specified to override any Dockerfile in the + // build context. For Source builds, this is assumed to be an archive as described above. For + // Source and container image builds, if binary.asFile is set the build will receive a directory with + // a single file. contextDir may be used when an archive is provided. Custom builds will + // receive this binary as input on STDIN. + optional BinaryBuildSource binary = 2; + + // dockerfile is the raw contents of a Dockerfile which should be built. When this option is + // specified, the FROM may be modified based on your strategy base image and additional ENV + // stanzas from your strategy environment will be added after the FROM, but before the rest + // of your Dockerfile stanzas. The Dockerfile source type may be used with other options like + // git - in those cases the Git repo will have any innate Dockerfile replaced in the context + // dir. + optional string dockerfile = 3; + + // git contains optional information about git build source + optional GitBuildSource git = 4; + + // images describes a set of images to be used to provide source for the build + repeated ImageSource images = 5; + + // contextDir specifies the sub-directory where the source code for the application exists. + // This allows to have buildable sources in directory other than root of + // repository. + optional string contextDir = 6; + + // sourceSecret is the name of a Secret that would be used for setting + // up the authentication for cloning private repository. + // The secret contains valid credentials for remote repository, where the + // data's key represent the authentication method to be used and value is + // the base64 encoded credentials. Supported auth methods are: ssh-privatekey. + optional .k8s.io.api.core.v1.LocalObjectReference sourceSecret = 7; + + // secrets represents a list of secrets and their destinations that will + // be used only for the build. + repeated SecretBuildSource secrets = 8; + + // configMaps represents a list of configMaps and their destinations that will + // be used for the build. + repeated ConfigMapBuildSource configMaps = 9; +} + +// BuildSpec has the information to represent a build and also additional +// information about a build +message BuildSpec { + // CommonSpec is the information that represents a build + optional CommonSpec commonSpec = 1; + + // triggeredBy describes which triggers started the most recent update to the + // build configuration and contains information about those triggers. + repeated BuildTriggerCause triggeredBy = 2; +} + +// BuildStatus contains the status of a build +message BuildStatus { + // phase is the point in the build lifecycle. Possible values are + // "New", "Pending", "Running", "Complete", "Failed", "Error", and "Cancelled". + optional string phase = 1; + + // cancelled describes if a cancel event was triggered for the build. + optional bool cancelled = 2; + + // reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI. + optional string reason = 3; + + // message is a human-readable message indicating details about why the build has this status. + optional string message = 4; + + // startTimestamp is a timestamp representing the server time when this Build started + // running in a Pod. + // It is represented in RFC3339 form and is in UTC. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startTimestamp = 5; + + // completionTimestamp is a timestamp representing the server time when this Build was + // finished, whether that build failed or succeeded. It reflects the time at which + // the Pod running the Build terminated. + // It is represented in RFC3339 form and is in UTC. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTimestamp = 6; + + // duration contains time.Duration object describing build time. + optional int64 duration = 7; + + // outputDockerImageReference contains a reference to the container image that + // will be built by this build. Its value is computed from + // Build.Spec.Output.To, and should include the registry address, so that + // it can be used to push and pull the image. + optional string outputDockerImageReference = 8; + + // config is an ObjectReference to the BuildConfig this Build is based on. + optional .k8s.io.api.core.v1.ObjectReference config = 9; + + // output describes the container image the build has produced. + optional BuildStatusOutput output = 10; + + // stages contains details about each stage that occurs during the build + // including start time, duration (in milliseconds), and the steps that + // occured within each stage. + repeated StageInfo stages = 11; + + // logSnippet is the last few lines of the build log. This value is only set for builds that failed. + optional string logSnippet = 12; + + // conditions represents the latest available observations of a build's current state. + // +patchMergeKey=type + // +patchStrategy=merge + repeated BuildCondition conditions = 13; +} + +// BuildStatusOutput contains the status of the built image. +message BuildStatusOutput { + // to describes the status of the built image being pushed to a registry. + optional BuildStatusOutputTo to = 1; +} + +// BuildStatusOutputTo describes the status of the built image with regards to +// image registry to which it was supposed to be pushed. +message BuildStatusOutputTo { + // imageDigest is the digest of the built container image. The digest uniquely + // identifies the image in the registry to which it was pushed. + // + // Please note that this field may not always be set even if the push + // completes successfully - e.g. when the registry returns no digest or + // returns it in a format that the builder doesn't understand. + optional string imageDigest = 1; +} + +// BuildStrategy contains the details of how to perform a build. +message BuildStrategy { + // type is the kind of build strategy. + // +k8s:conversion-gen=false + // +optional + optional string type = 1; + + // dockerStrategy holds the parameters to the container image build strategy. + optional DockerBuildStrategy dockerStrategy = 2; + + // sourceStrategy holds the parameters to the Source build strategy. + optional SourceBuildStrategy sourceStrategy = 3; + + // customStrategy holds the parameters to the Custom build strategy + optional CustomBuildStrategy customStrategy = 4; + + // jenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. + // Deprecated: use OpenShift Pipelines + optional JenkinsPipelineBuildStrategy jenkinsPipelineStrategy = 5; +} + +// BuildTriggerCause holds information about a triggered build. It is used for +// displaying build trigger data for each build and build configuration in oc +// describe. It is also used to describe which triggers led to the most recent +// update in the build configuration. +message BuildTriggerCause { + // message is used to store a human readable message for why the build was + // triggered. E.g.: "Manually triggered by user", "Configuration change",etc. + optional string message = 1; + + // genericWebHook holds data about a builds generic webhook trigger. + optional GenericWebHookCause genericWebHook = 2; + + // githubWebHook represents data for a GitHub webhook that fired a + // specific build. + optional GitHubWebHookCause githubWebHook = 3; + + // imageChangeBuild stores information about an imagechange event + // that triggered a new build. + optional ImageChangeCause imageChangeBuild = 4; + + // gitlabWebHook represents data for a GitLab webhook that fired a specific + // build. + optional GitLabWebHookCause gitlabWebHook = 5; + + // bitbucketWebHook represents data for a Bitbucket webhook that fired a + // specific build. + optional BitbucketWebHookCause bitbucketWebHook = 6; +} + +// BuildTriggerPolicy describes a policy for a single trigger that results in a new Build. +message BuildTriggerPolicy { + // type is the type of build trigger. Valid values: + // + // - GitHub + // GitHubWebHookBuildTriggerType represents a trigger that launches builds on + // GitHub webhook invocations + // + // - Generic + // GenericWebHookBuildTriggerType represents a trigger that launches builds on + // generic webhook invocations + // + // - GitLab + // GitLabWebHookBuildTriggerType represents a trigger that launches builds on + // GitLab webhook invocations + // + // - Bitbucket + // BitbucketWebHookBuildTriggerType represents a trigger that launches builds on + // Bitbucket webhook invocations + // + // - ImageChange + // ImageChangeBuildTriggerType represents a trigger that launches builds on + // availability of a new version of an image + // + // - ConfigChange + // ConfigChangeBuildTriggerType will trigger a build on an initial build config creation + // WARNING: In the future the behavior will change to trigger a build on any config change + optional string type = 1; + + // github contains the parameters for a GitHub webhook type of trigger + optional WebHookTrigger github = 2; + + // generic contains the parameters for a Generic webhook type of trigger + optional WebHookTrigger generic = 3; + + // imageChange contains parameters for an ImageChange type of trigger + optional ImageChangeTrigger imageChange = 4; + + // GitLabWebHook contains the parameters for a GitLab webhook type of trigger + optional WebHookTrigger gitlab = 5; + + // BitbucketWebHook contains the parameters for a Bitbucket webhook type of + // trigger + optional WebHookTrigger bitbucket = 6; +} + +// BuildVolume describes a volume that is made available to build pods, +// such that it can be mounted into buildah's runtime environment. +// Only a subset of Kubernetes Volume sources are supported. +message BuildVolume { + // name is a unique identifier for this BuildVolume. + // It must conform to the Kubernetes DNS label standard and be unique within the pod. + // Names that collide with those added by the build controller will result in a + // failed build with an error message detailing which name caused the error. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // +required + optional string name = 1; + + // source represents the location and type of the mounted volume. + // +required + optional BuildVolumeSource source = 2; + + // mounts represents the location of the volume in the image build container + // +required + // +listType=map + // +listMapKey=destinationPath + // +patchMergeKey=destinationPath + // +patchStrategy=merge + repeated BuildVolumeMount mounts = 3; +} + +// BuildVolumeMount describes the mounting of a Volume within buildah's runtime environment. +message BuildVolumeMount { + // destinationPath is the path within the buildah runtime environment at which the volume should be mounted. + // The transient mount within the build image and the backing volume will both be mounted read only. + // Must be an absolute path, must not contain '..' or ':', and must not collide with a destination path generated + // by the builder process + // Paths that collide with those added by the build controller will result in a + // failed build with an error message detailing which path caused the error. + optional string destinationPath = 1; +} + +// BuildVolumeSource represents the source of a volume to mount +// Only one of its supported types may be specified at any given time. +message BuildVolumeSource { + // type is the BuildVolumeSourceType for the volume source. + // Type must match the populated volume source. + // Valid types are: Secret, ConfigMap + optional string type = 1; + + // secret represents a Secret that should populate this volume. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + // +optional + optional .k8s.io.api.core.v1.SecretVolumeSource secret = 2; + + // configMap represents a ConfigMap that should populate this volume + // +optional + optional .k8s.io.api.core.v1.ConfigMapVolumeSource configMap = 3; + + // csi represents ephemeral storage provided by external CSI drivers which support this capability + // +optional + optional .k8s.io.api.core.v1.CSIVolumeSource csi = 4; +} + +// CommonSpec encapsulates all the inputs necessary to represent a build. +message CommonSpec { + // serviceAccount is the name of the ServiceAccount to use to run the pod + // created by this build. + // The pod will be allowed to use secrets referenced by the ServiceAccount + optional string serviceAccount = 1; + + // source describes the SCM in use. + optional BuildSource source = 2; + + // revision is the information from the source for a specific repo snapshot. + // This is optional. + optional SourceRevision revision = 3; + + // strategy defines how to perform a build. + optional BuildStrategy strategy = 4; + + // output describes the container image the Strategy should produce. + optional BuildOutput output = 5; + + // resources computes resource requirements to execute the build. + optional .k8s.io.api.core.v1.ResourceRequirements resources = 6; + + // postCommit is a build hook executed after the build output image is + // committed, before it is pushed to a registry. + optional BuildPostCommitSpec postCommit = 7; + + // completionDeadlineSeconds is an optional duration in seconds, counted from + // the time when a build pod gets scheduled in the system, that the build may + // be active on a node before the system actively tries to terminate the + // build; value must be positive integer + optional int64 completionDeadlineSeconds = 8; + + // nodeSelector is a selector which must be true for the build pod to fit on a node + // If nil, it can be overridden by default build nodeselector values for the cluster. + // If set to an empty map or a map with any values, default build nodeselector values + // are ignored. + // +optional + optional OptionalNodeSelector nodeSelector = 9; + + // mountTrustedCA bind mounts the cluster's trusted certificate authorities, as defined in + // the cluster's proxy configuration, into the build. This lets processes within a build trust + // components signed by custom PKI certificate authorities, such as private artifact + // repositories and HTTPS proxies. + // + // When this field is set to true, the contents of `/etc/pki/ca-trust` within the build are + // managed by the build container, and any changes to this directory or its subdirectories (for + // example - within a Dockerfile `RUN` instruction) are not persisted in the build's output image. + optional bool mountTrustedCA = 10; +} + +// CommonWebHookCause factors out the identical format of these webhook +// causes into struct so we can share it in the specific causes; it is too late for +// GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket. +message CommonWebHookCause { + // revision is the git source revision information of the trigger. + optional SourceRevision revision = 1; + + // secret is the obfuscated webhook secret that triggered a build. + optional string secret = 2; +} + +// ConfigMapBuildSource describes a configmap and its destination directory that will be +// used only at the build time. The content of the configmap referenced here will +// be copied into the destination directory instead of mounting. +message ConfigMapBuildSource { + // configMap is a reference to an existing configmap that you want to use in your + // build. + optional .k8s.io.api.core.v1.LocalObjectReference configMap = 1; + + // destinationDir is the directory where the files from the configmap should be + // available for the build time. + // For the Source build strategy, these will be injected into a container + // where the assemble script runs. + // For the container image build strategy, these will be copied into the build + // directory, where the Dockerfile is located, so users can ADD or COPY them + // during container image build. + optional string destinationDir = 2; +} + +// CustomBuildStrategy defines input parameters specific to Custom build. +message CustomBuildStrategy { + // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which + // the container image should be pulled + optional .k8s.io.api.core.v1.ObjectReference from = 1; + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + optional .k8s.io.api.core.v1.LocalObjectReference pullSecret = 2; + + // env contains additional environment variables you want to pass into a builder container. + repeated .k8s.io.api.core.v1.EnvVar env = 3; + + // exposeDockerSocket will allow running Docker commands (and build container images) from + // inside the container. + // TODO: Allow admins to enforce 'false' for this option + optional bool exposeDockerSocket = 4; + + // forcePull describes if the controller should configure the build pod to always pull the images + // for the builder or only pull if it is not present locally + optional bool forcePull = 5; + + // secrets is a list of additional secrets that will be included in the build pod + repeated SecretSpec secrets = 6; + + // buildAPIVersion is the requested API version for the Build object serialized and passed to the custom builder + optional string buildAPIVersion = 7; +} + +// DockerBuildStrategy defines input parameters specific to container image build. +message DockerBuildStrategy { + // from is a reference to an DockerImage, ImageStreamTag, or ImageStreamImage which overrides + // the FROM image in the Dockerfile for the build. If the Dockerfile uses multi-stage builds, + // this will replace the image in the last FROM directive of the file. + optional .k8s.io.api.core.v1.ObjectReference from = 1; + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + optional .k8s.io.api.core.v1.LocalObjectReference pullSecret = 2; + + // noCache if set to true indicates that the container image build must be executed with the + // --no-cache=true flag + optional bool noCache = 3; + + // env contains additional environment variables you want to pass into a builder container. + repeated .k8s.io.api.core.v1.EnvVar env = 4; + + // forcePull describes if the builder should pull the images from registry prior to building. + optional bool forcePull = 5; + + // dockerfilePath is the path of the Dockerfile that will be used to build the container image, + // relative to the root of the context (contextDir). + // Defaults to `Dockerfile` if unset. + optional string dockerfilePath = 6; + + // buildArgs contains build arguments that will be resolved in the Dockerfile. See + // https://docs.docker.com/engine/reference/builder/#/arg for more details. + // NOTE: Only the 'name' and 'value' fields are supported. Any settings on the 'valueFrom' field + // are ignored. + repeated .k8s.io.api.core.v1.EnvVar buildArgs = 7; + + // imageOptimizationPolicy describes what optimizations the system can use when building images + // to reduce the final size or time spent building the image. The default policy is 'None' which + // means the final build image will be equivalent to an image created by the container image build API. + // The experimental policy 'SkipLayers' will avoid commiting new layers in between each + // image step, and will fail if the Dockerfile cannot provide compatibility with the 'None' + // policy. An additional experimental policy 'SkipLayersAndWarn' is the same as + // 'SkipLayers' but simply warns if compatibility cannot be preserved. + optional string imageOptimizationPolicy = 8; + + // volumes is a list of input volumes that can be mounted into the builds runtime environment. + // Only a subset of Kubernetes Volume sources are supported by builds. + // More info: https://kubernetes.io/docs/concepts/storage/volumes + // +listType=map + // +listMapKey=name + // +patchMergeKey=name + // +patchStrategy=merge + repeated BuildVolume volumes = 9; +} + +// DockerStrategyOptions contains extra strategy options for container image builds +message DockerStrategyOptions { + // Args contains any build arguments that are to be passed to Docker. See + // https://docs.docker.com/engine/reference/builder/#/arg for more details + repeated .k8s.io.api.core.v1.EnvVar buildArgs = 1; + + // noCache overrides the docker-strategy noCache option in the build config + optional bool noCache = 2; +} + +// GenericWebHookCause holds information about a generic WebHook that +// triggered a build. +message GenericWebHookCause { + // revision is an optional field that stores the git source revision + // information of the generic webhook trigger when it is available. + optional SourceRevision revision = 1; + + // secret is the obfuscated webhook secret that triggered a build. + optional string secret = 2; +} + +// GenericWebHookEvent is the payload expected for a generic webhook post +message GenericWebHookEvent { + // type is the type of source repository + // +k8s:conversion-gen=false + optional string type = 1; + + // git is the git information if the Type is BuildSourceGit + optional GitInfo git = 2; + + // env contains additional environment variables you want to pass into a builder container. + // ValueFrom is not supported. + repeated .k8s.io.api.core.v1.EnvVar env = 3; + + // dockerStrategyOptions contains additional docker-strategy specific options for the build + optional DockerStrategyOptions dockerStrategyOptions = 4; +} + +// GitBuildSource defines the parameters of a Git SCM +message GitBuildSource { + // uri points to the source that will be built. The structure of the source + // will depend on the type of build to run + optional string uri = 1; + + // ref is the branch/tag/ref to build. + optional string ref = 2; + + // proxyConfig defines the proxies to use for the git clone operation. Values + // not set here are inherited from cluster-wide build git proxy settings. + optional ProxyConfig proxyConfig = 3; +} + +// GitHubWebHookCause has information about a GitHub webhook that triggered a +// build. +message GitHubWebHookCause { + // revision is the git revision information of the trigger. + optional SourceRevision revision = 1; + + // secret is the obfuscated webhook secret that triggered a build. + optional string secret = 2; +} + +// GitInfo is the aggregated git information for a generic webhook post +message GitInfo { + optional GitBuildSource gitBuildSource = 1; + + optional GitSourceRevision gitSourceRevision = 2; + + // refs is a list of GitRefs for the provided repo - generally sent + // when used from a post-receive hook. This field is optional and is + // used when sending multiple refs + repeated GitRefInfo refs = 3; +} + +// GitLabWebHookCause has information about a GitLab webhook that triggered a +// build. +message GitLabWebHookCause { + optional CommonWebHookCause commonSpec = 1; +} + +// GitRefInfo is a single ref +message GitRefInfo { + optional GitBuildSource gitBuildSource = 1; + + optional GitSourceRevision gitSourceRevision = 2; +} + +// GitSourceRevision is the commit information from a git source for a build +message GitSourceRevision { + // commit is the commit hash identifying a specific commit + optional string commit = 1; + + // author is the author of a specific commit + optional SourceControlUser author = 2; + + // committer is the committer of a specific commit + optional SourceControlUser committer = 3; + + // message is the description of a specific commit + optional string message = 4; +} + +// ImageChangeCause contains information about the image that triggered a +// build +message ImageChangeCause { + // imageID is the ID of the image that triggered a new build. + optional string imageID = 1; + + // fromRef contains detailed information about an image that triggered a + // build. + optional .k8s.io.api.core.v1.ObjectReference fromRef = 2; +} + +// ImageChangeTrigger allows builds to be triggered when an ImageStream changes +message ImageChangeTrigger { + // lastTriggeredImageID is used internally by the ImageChangeController to save last + // used image ID for build + // This field is deprecated and will be removed in a future release. + // Deprecated + optional string lastTriggeredImageID = 1; + + // from is a reference to an ImageStreamTag that will trigger a build when updated + // It is optional. If no From is specified, the From image from the build strategy + // will be used. Only one ImageChangeTrigger with an empty From reference is allowed in + // a build configuration. + optional .k8s.io.api.core.v1.ObjectReference from = 2; + + // paused is true if this trigger is temporarily disabled. Optional. + optional bool paused = 3; +} + +// ImageChangeTriggerStatus tracks the latest resolved status of the associated ImageChangeTrigger policy +// specified in the BuildConfigSpec.Triggers struct. +message ImageChangeTriggerStatus { + // lastTriggeredImageID represents the sha/id of the ImageStreamTag when a Build for this BuildConfig was started. + // The lastTriggeredImageID is updated each time a Build for this BuildConfig is started, even if this ImageStreamTag is not the reason the Build is started. + optional string lastTriggeredImageID = 1; + + // from is the ImageStreamTag that is the source of the trigger. + optional ImageStreamTagReference from = 2; + + // lastTriggerTime is the last time this particular ImageStreamTag triggered a Build to start. + // This field is only updated when this trigger specifically started a Build. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTriggerTime = 3; +} + +// ImageLabel represents a label applied to the resulting image. +message ImageLabel { + // name defines the name of the label. It must have non-zero length. + optional string name = 1; + + // value defines the literal value of the label. + optional string value = 2; +} + +// ImageSource is used to describe build source that will be extracted from an image or used during a +// multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. +// A pull secret can be specified to pull the image from an external registry or override the default +// service account secret if pulling from the internal registry. Image sources can either be used to +// extract content from an image and place it into the build context along with the repository source, +// or used directly during a multi-stage container image build to allow content to be copied without overwriting +// the contents of the repository source (see the 'paths' and 'as' fields). +message ImageSource { + // from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to + // copy source from. + optional .k8s.io.api.core.v1.ObjectReference from = 1; + + // A list of image names that this source will be used in place of during a multi-stage container image + // build. For instance, a Dockerfile that uses "COPY --from=nginx:latest" will first check for an image + // source that has "nginx:latest" in this field before attempting to pull directly. If the Dockerfile + // does not reference an image source it is ignored. This field and paths may both be set, in which case + // the contents will be used twice. + // +optional + repeated string as = 4; + + // paths is a list of source and destination paths to copy from the image. This content will be copied + // into the build context prior to starting the build. If no paths are set, the build context will + // not be altered. + // +optional + repeated ImageSourcePath paths = 2; + + // pullSecret is a reference to a secret to be used to pull the image from a registry + // If the image is pulled from the OpenShift registry, this field does not need to be set. + optional .k8s.io.api.core.v1.LocalObjectReference pullSecret = 3; +} + +// ImageSourcePath describes a path to be copied from a source image and its destination within the build directory. +message ImageSourcePath { + // sourcePath is the absolute path of the file or directory inside the image to + // copy to the build directory. If the source path ends in /. then the content of + // the directory will be copied, but the directory itself will not be created at the + // destination. + optional string sourcePath = 1; + + // destinationDir is the relative directory within the build directory + // where files copied from the image are placed. + optional string destinationDir = 2; +} + +// ImageStreamTagReference references the ImageStreamTag in an image change trigger by namespace and name. +message ImageStreamTagReference { + // namespace is the namespace where the ImageStreamTag for an ImageChangeTrigger is located + optional string namespace = 1; + + // name is the name of the ImageStreamTag for an ImageChangeTrigger + optional string name = 2; +} + +// JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. +// Deprecated: use OpenShift Pipelines +message JenkinsPipelineBuildStrategy { + // jenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline + // relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are + // both not specified, this defaults to Jenkinsfile in the root of the specified contextDir. + optional string jenkinsfilePath = 1; + + // jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build. + optional string jenkinsfile = 2; + + // env contains additional environment variables you want to pass into a build pipeline. + repeated .k8s.io.api.core.v1.EnvVar env = 3; +} + +// OptionalNodeSelector is a map that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message OptionalNodeSelector { + // items, if empty, will result in an empty map + + map items = 1; +} + +// ProxyConfig defines what proxies to use for an operation +message ProxyConfig { + // httpProxy is a proxy used to reach the git repository over http + optional string httpProxy = 3; + + // httpsProxy is a proxy used to reach the git repository over https + optional string httpsProxy = 4; + + // noProxy is the list of domains for which the proxy should not be used + optional string noProxy = 5; +} + +// SecretBuildSource describes a secret and its destination directory that will be +// used only at the build time. The content of the secret referenced here will +// be copied into the destination directory instead of mounting. +message SecretBuildSource { + // secret is a reference to an existing secret that you want to use in your + // build. + optional .k8s.io.api.core.v1.LocalObjectReference secret = 1; + + // destinationDir is the directory where the files from the secret should be + // available for the build time. + // For the Source build strategy, these will be injected into a container + // where the assemble script runs. Later, when the script finishes, all files + // injected will be truncated to zero length. + // For the container image build strategy, these will be copied into the build + // directory, where the Dockerfile is located, so users can ADD or COPY them + // during container image build. + optional string destinationDir = 2; +} + +// SecretLocalReference contains information that points to the local secret being used +message SecretLocalReference { + // name is the name of the resource in the same namespace being referenced + optional string name = 1; +} + +// SecretSpec specifies a secret to be included in a build pod and its corresponding mount point +message SecretSpec { + // secretSource is a reference to the secret + optional .k8s.io.api.core.v1.LocalObjectReference secretSource = 1; + + // mountPath is the path at which to mount the secret + optional string mountPath = 2; +} + +// SourceBuildStrategy defines input parameters specific to an Source build. +message SourceBuildStrategy { + // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which + // the container image should be pulled + optional .k8s.io.api.core.v1.ObjectReference from = 1; + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + optional .k8s.io.api.core.v1.LocalObjectReference pullSecret = 2; + + // env contains additional environment variables you want to pass into a builder container. + repeated .k8s.io.api.core.v1.EnvVar env = 3; + + // scripts is the location of Source scripts + optional string scripts = 4; + + // incremental flag forces the Source build to do incremental builds if true. + optional bool incremental = 5; + + // forcePull describes if the builder should pull the images from registry prior to building. + optional bool forcePull = 6; + + // volumes is a list of input volumes that can be mounted into the builds runtime environment. + // Only a subset of Kubernetes Volume sources are supported by builds. + // More info: https://kubernetes.io/docs/concepts/storage/volumes + // +listType=map + // +listMapKey=name + // +patchMergeKey=name + // +patchStrategy=merge + repeated BuildVolume volumes = 9; +} + +// SourceControlUser defines the identity of a user of source control +message SourceControlUser { + // name of the source control user + optional string name = 1; + + // email of the source control user + optional string email = 2; +} + +// SourceRevision is the revision or commit information from the source for the build +message SourceRevision { + // type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images' + // +k8s:conversion-gen=false + optional string type = 1; + + // git contains information about git-based build source + optional GitSourceRevision git = 2; +} + +// SourceStrategyOptions contains extra strategy options for Source builds +message SourceStrategyOptions { + // incremental overrides the source-strategy incremental option in the build config + optional bool incremental = 1; +} + +// StageInfo contains details about a build stage. +message StageInfo { + // name is a unique identifier for each build stage that occurs. + optional string name = 1; + + // startTime is a timestamp representing the server time when this Stage started. + // It is represented in RFC3339 form and is in UTC. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; + + // durationMilliseconds identifies how long the stage took + // to complete in milliseconds. + // Note: the duration of a stage can exceed the sum of the duration of the steps within + // the stage as not all actions are accounted for in explicit build steps. + optional int64 durationMilliseconds = 3; + + // steps contains details about each step that occurs during a build stage + // including start time and duration in milliseconds. + repeated StepInfo steps = 4; +} + +// StepInfo contains details about a build step. +message StepInfo { + // name is a unique identifier for each build step. + optional string name = 1; + + // startTime is a timestamp representing the server time when this Step started. + // it is represented in RFC3339 form and is in UTC. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; + + // durationMilliseconds identifies how long the step took + // to complete in milliseconds. + optional int64 durationMilliseconds = 3; +} + +// WebHookTrigger is a trigger that gets invoked using a webhook type of post +message WebHookTrigger { + // secret used to validate requests. + // Deprecated: use SecretReference instead. + optional string secret = 1; + + // allowEnv determines whether the webhook can set environment variables; can only + // be set to true for GenericWebHook. + optional bool allowEnv = 2; + + // secretReference is a reference to a secret in the same namespace, + // containing the value to be validated when the webhook is invoked. + // The secret being referenced must contain a key named "WebHookSecretKey", the value + // of which will be checked against the value supplied in the webhook invocation. + optional SecretLocalReference secretReference = 3; +} + diff --git a/vendor/github.com/openshift/api/build/v1/legacy.go b/vendor/github.com/openshift/api/build/v1/legacy.go new file mode 100644 index 0000000000000..a74627d2cda08 --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/legacy.go @@ -0,0 +1,28 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &Build{}, + &BuildList{}, + &BuildConfig{}, + &BuildConfigList{}, + &BuildLog{}, + &BuildRequest{}, + &BuildLogOptions{}, + &BinaryBuildRequestOptions{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/build/v1/register.go b/vendor/github.com/openshift/api/build/v1/register.go new file mode 100644 index 0000000000000..16f68ea8cd6d8 --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/register.go @@ -0,0 +1,47 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "build.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// addKnownTypes adds types to API group +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &Build{}, + &BuildList{}, + &BuildConfig{}, + &BuildConfigList{}, + &BuildLog{}, + &BuildRequest{}, + &BuildLogOptions{}, + &BinaryBuildRequestOptions{}, + // This is needed for webhooks + &corev1.PodProxyOptions{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/build/v1/types.go b/vendor/github.com/openshift/api/build/v1/types.go new file mode 100644 index 0000000000000..12bf67db1a2eb --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/types.go @@ -0,0 +1,1469 @@ +package v1 + +import ( + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:method=UpdateDetails,verb=update,subresource=details +// +genclient:method=Clone,verb=create,subresource=clone,input=BuildRequest +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Build encapsulates the inputs needed to produce a new deployable image, as well as +// the status of the execution and a reference to the Pod which executed the build. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Build struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is all the inputs used to execute the build. + Spec BuildSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // status is the current status of the build. + // +optional + Status BuildStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// BuildSpec has the information to represent a build and also additional +// information about a build +type BuildSpec struct { + // CommonSpec is the information that represents a build + CommonSpec `json:",inline" protobuf:"bytes,1,opt,name=commonSpec"` + + // triggeredBy describes which triggers started the most recent update to the + // build configuration and contains information about those triggers. + TriggeredBy []BuildTriggerCause `json:"triggeredBy,omitempty" protobuf:"bytes,2,rep,name=triggeredBy"` +} + +// OptionalNodeSelector is a map that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type OptionalNodeSelector map[string]string + +func (t OptionalNodeSelector) String() string { + return fmt.Sprintf("%v", map[string]string(t)) +} + +// CommonSpec encapsulates all the inputs necessary to represent a build. +type CommonSpec struct { + // serviceAccount is the name of the ServiceAccount to use to run the pod + // created by this build. + // The pod will be allowed to use secrets referenced by the ServiceAccount + ServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,1,opt,name=serviceAccount"` + + // source describes the SCM in use. + Source BuildSource `json:"source,omitempty" protobuf:"bytes,2,opt,name=source"` + + // revision is the information from the source for a specific repo snapshot. + // This is optional. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,3,opt,name=revision"` + + // strategy defines how to perform a build. + Strategy BuildStrategy `json:"strategy" protobuf:"bytes,4,opt,name=strategy"` + + // output describes the container image the Strategy should produce. + Output BuildOutput `json:"output,omitempty" protobuf:"bytes,5,opt,name=output"` + + // resources computes resource requirements to execute the build. + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,6,opt,name=resources"` + + // postCommit is a build hook executed after the build output image is + // committed, before it is pushed to a registry. + PostCommit BuildPostCommitSpec `json:"postCommit,omitempty" protobuf:"bytes,7,opt,name=postCommit"` + + // completionDeadlineSeconds is an optional duration in seconds, counted from + // the time when a build pod gets scheduled in the system, that the build may + // be active on a node before the system actively tries to terminate the + // build; value must be positive integer + CompletionDeadlineSeconds *int64 `json:"completionDeadlineSeconds,omitempty" protobuf:"varint,8,opt,name=completionDeadlineSeconds"` + + // nodeSelector is a selector which must be true for the build pod to fit on a node + // If nil, it can be overridden by default build nodeselector values for the cluster. + // If set to an empty map or a map with any values, default build nodeselector values + // are ignored. + // +optional + NodeSelector OptionalNodeSelector `json:"nodeSelector" protobuf:"bytes,9,name=nodeSelector"` + + // mountTrustedCA bind mounts the cluster's trusted certificate authorities, as defined in + // the cluster's proxy configuration, into the build. This lets processes within a build trust + // components signed by custom PKI certificate authorities, such as private artifact + // repositories and HTTPS proxies. + // + // When this field is set to true, the contents of `/etc/pki/ca-trust` within the build are + // managed by the build container, and any changes to this directory or its subdirectories (for + // example - within a Dockerfile `RUN` instruction) are not persisted in the build's output image. + MountTrustedCA *bool `json:"mountTrustedCA,omitempty" protobuf:"varint,10,opt,name=mountTrustedCA"` +} + +// BuildTriggerCause holds information about a triggered build. It is used for +// displaying build trigger data for each build and build configuration in oc +// describe. It is also used to describe which triggers led to the most recent +// update in the build configuration. +type BuildTriggerCause struct { + // message is used to store a human readable message for why the build was + // triggered. E.g.: "Manually triggered by user", "Configuration change",etc. + Message string `json:"message,omitempty" protobuf:"bytes,1,opt,name=message"` + + // genericWebHook holds data about a builds generic webhook trigger. + GenericWebHook *GenericWebHookCause `json:"genericWebHook,omitempty" protobuf:"bytes,2,opt,name=genericWebHook"` + + // githubWebHook represents data for a GitHub webhook that fired a + //specific build. + GitHubWebHook *GitHubWebHookCause `json:"githubWebHook,omitempty" protobuf:"bytes,3,opt,name=githubWebHook"` + + // imageChangeBuild stores information about an imagechange event + // that triggered a new build. + ImageChangeBuild *ImageChangeCause `json:"imageChangeBuild,omitempty" protobuf:"bytes,4,opt,name=imageChangeBuild"` + + // gitlabWebHook represents data for a GitLab webhook that fired a specific + // build. + GitLabWebHook *GitLabWebHookCause `json:"gitlabWebHook,omitempty" protobuf:"bytes,5,opt,name=gitlabWebHook"` + + // bitbucketWebHook represents data for a Bitbucket webhook that fired a + // specific build. + BitbucketWebHook *BitbucketWebHookCause `json:"bitbucketWebHook,omitempty" protobuf:"bytes,6,opt,name=bitbucketWebHook"` +} + +// GenericWebHookCause holds information about a generic WebHook that +// triggered a build. +type GenericWebHookCause struct { + // revision is an optional field that stores the git source revision + // information of the generic webhook trigger when it is available. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"` + + // secret is the obfuscated webhook secret that triggered a build. + Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` +} + +// GitHubWebHookCause has information about a GitHub webhook that triggered a +// build. +type GitHubWebHookCause struct { + // revision is the git revision information of the trigger. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"` + + // secret is the obfuscated webhook secret that triggered a build. + Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` +} + +// CommonWebHookCause factors out the identical format of these webhook +// causes into struct so we can share it in the specific causes; it is too late for +// GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket. +type CommonWebHookCause struct { + // revision is the git source revision information of the trigger. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"` + + // secret is the obfuscated webhook secret that triggered a build. + Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` +} + +// GitLabWebHookCause has information about a GitLab webhook that triggered a +// build. +type GitLabWebHookCause struct { + CommonWebHookCause `json:",inline" protobuf:"bytes,1,opt,name=commonSpec"` +} + +// BitbucketWebHookCause has information about a Bitbucket webhook that triggered a +// build. +type BitbucketWebHookCause struct { + CommonWebHookCause `json:",inline" protobuf:"bytes,1,opt,name=commonSpec"` +} + +// ImageChangeCause contains information about the image that triggered a +// build +type ImageChangeCause struct { + // imageID is the ID of the image that triggered a new build. + ImageID string `json:"imageID,omitempty" protobuf:"bytes,1,opt,name=imageID"` + + // fromRef contains detailed information about an image that triggered a + // build. + FromRef *corev1.ObjectReference `json:"fromRef,omitempty" protobuf:"bytes,2,opt,name=fromRef"` +} + +// BuildStatus contains the status of a build +type BuildStatus struct { + // phase is the point in the build lifecycle. Possible values are + // "New", "Pending", "Running", "Complete", "Failed", "Error", and "Cancelled". + Phase BuildPhase `json:"phase" protobuf:"bytes,1,opt,name=phase,casttype=BuildPhase"` + + // cancelled describes if a cancel event was triggered for the build. + Cancelled bool `json:"cancelled,omitempty" protobuf:"varint,2,opt,name=cancelled"` + + // reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI. + Reason StatusReason `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason,casttype=StatusReason"` + + // message is a human-readable message indicating details about why the build has this status. + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` + + // startTimestamp is a timestamp representing the server time when this Build started + // running in a Pod. + // It is represented in RFC3339 form and is in UTC. + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty" protobuf:"bytes,5,opt,name=startTimestamp"` + + // completionTimestamp is a timestamp representing the server time when this Build was + // finished, whether that build failed or succeeded. It reflects the time at which + // the Pod running the Build terminated. + // It is represented in RFC3339 form and is in UTC. + CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty" protobuf:"bytes,6,opt,name=completionTimestamp"` + + // duration contains time.Duration object describing build time. + Duration time.Duration `json:"duration,omitempty" protobuf:"varint,7,opt,name=duration,casttype=time.Duration"` + + // outputDockerImageReference contains a reference to the container image that + // will be built by this build. Its value is computed from + // Build.Spec.Output.To, and should include the registry address, so that + // it can be used to push and pull the image. + OutputDockerImageReference string `json:"outputDockerImageReference,omitempty" protobuf:"bytes,8,opt,name=outputDockerImageReference"` + + // config is an ObjectReference to the BuildConfig this Build is based on. + Config *corev1.ObjectReference `json:"config,omitempty" protobuf:"bytes,9,opt,name=config"` + + // output describes the container image the build has produced. + Output BuildStatusOutput `json:"output,omitempty" protobuf:"bytes,10,opt,name=output"` + + // stages contains details about each stage that occurs during the build + // including start time, duration (in milliseconds), and the steps that + // occured within each stage. + Stages []StageInfo `json:"stages,omitempty" protobuf:"bytes,11,opt,name=stages"` + + // logSnippet is the last few lines of the build log. This value is only set for builds that failed. + LogSnippet string `json:"logSnippet,omitempty" protobuf:"bytes,12,opt,name=logSnippet"` + + // conditions represents the latest available observations of a build's current state. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []BuildCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,13,rep,name=conditions"` +} + +// StageInfo contains details about a build stage. +type StageInfo struct { + // name is a unique identifier for each build stage that occurs. + Name StageName `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // startTime is a timestamp representing the server time when this Stage started. + // It is represented in RFC3339 form and is in UTC. + StartTime metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` + + // durationMilliseconds identifies how long the stage took + // to complete in milliseconds. + // Note: the duration of a stage can exceed the sum of the duration of the steps within + // the stage as not all actions are accounted for in explicit build steps. + DurationMilliseconds int64 `json:"durationMilliseconds,omitempty" protobuf:"varint,3,opt,name=durationMilliseconds"` + + // steps contains details about each step that occurs during a build stage + // including start time and duration in milliseconds. + Steps []StepInfo `json:"steps,omitempty" protobuf:"bytes,4,opt,name=steps"` +} + +// StageName is the unique identifier for each build stage. +type StageName string + +// Valid values for StageName +const ( + // StageFetchInputs fetches any inputs such as source code. + StageFetchInputs StageName = "FetchInputs" + + // StagePullImages pulls any images that are needed such as + // base images or input images. + StagePullImages StageName = "PullImages" + + // StageBuild performs the steps necessary to build the image. + StageBuild StageName = "Build" + + // StagePostCommit executes any post commit steps. + StagePostCommit StageName = "PostCommit" + + // StagePushImage pushes the image to the node. + StagePushImage StageName = "PushImage" +) + +// StepInfo contains details about a build step. +type StepInfo struct { + // name is a unique identifier for each build step. + Name StepName `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // startTime is a timestamp representing the server time when this Step started. + // it is represented in RFC3339 form and is in UTC. + StartTime metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` + + // durationMilliseconds identifies how long the step took + // to complete in milliseconds. + DurationMilliseconds int64 `json:"durationMilliseconds,omitempty" protobuf:"varint,3,opt,name=durationMilliseconds"` +} + +// StepName is a unique identifier for each build step. +type StepName string + +// Valid values for StepName +const ( + // StepExecPostCommitHook executes the buildconfigs post commit hook. + StepExecPostCommitHook StepName = "RunPostCommitHook" + + // StepFetchGitSource fetches source code for the build. + StepFetchGitSource StepName = "FetchGitSource" + + // StepPullBaseImage pulls a base image for the build. + StepPullBaseImage StepName = "PullBaseImage" + + // StepPullInputImage pulls an input image for the build. + StepPullInputImage StepName = "PullInputImage" + + // StepPushImage pushes an image to the registry. + StepPushImage StepName = "PushImage" + + // StepPushDockerImage pushes a container image to the registry. + StepPushDockerImage StepName = "PushDockerImage" + + //StepDockerBuild performs the container image build + StepDockerBuild StepName = "DockerBuild" +) + +// BuildPhase represents the status of a build at a point in time. +type BuildPhase string + +// Valid values for BuildPhase. +const ( + // BuildPhaseNew is automatically assigned to a newly created build. + BuildPhaseNew BuildPhase = "New" + + // BuildPhasePending indicates that a pod name has been assigned and a build is + // about to start running. + BuildPhasePending BuildPhase = "Pending" + + // BuildPhaseRunning indicates that a pod has been created and a build is running. + BuildPhaseRunning BuildPhase = "Running" + + // BuildPhaseComplete indicates that a build has been successful. + BuildPhaseComplete BuildPhase = "Complete" + + // BuildPhaseFailed indicates that a build has executed and failed. + BuildPhaseFailed BuildPhase = "Failed" + + // BuildPhaseError indicates that an error prevented the build from executing. + BuildPhaseError BuildPhase = "Error" + + // BuildPhaseCancelled indicates that a running/pending build was stopped from executing. + BuildPhaseCancelled BuildPhase = "Cancelled" +) + +type BuildConditionType string + +// BuildCondition describes the state of a build at a certain point. +type BuildCondition struct { + // type of build condition. + Type BuildConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildConditionType"` + // status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` + // The last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// StatusReason is a brief CamelCase string that describes a temporary or +// permanent build error condition, meant for machine parsing and tidy display +// in the CLI. +type StatusReason string + +// BuildStatusOutput contains the status of the built image. +type BuildStatusOutput struct { + // to describes the status of the built image being pushed to a registry. + To *BuildStatusOutputTo `json:"to,omitempty" protobuf:"bytes,1,opt,name=to"` +} + +// BuildStatusOutputTo describes the status of the built image with regards to +// image registry to which it was supposed to be pushed. +type BuildStatusOutputTo struct { + // imageDigest is the digest of the built container image. The digest uniquely + // identifies the image in the registry to which it was pushed. + // + // Please note that this field may not always be set even if the push + // completes successfully - e.g. when the registry returns no digest or + // returns it in a format that the builder doesn't understand. + ImageDigest string `json:"imageDigest,omitempty" protobuf:"bytes,1,opt,name=imageDigest"` +} + +// BuildSourceType is the type of SCM used. +type BuildSourceType string + +// Valid values for BuildSourceType. +const ( + //BuildSourceGit instructs a build to use a Git source control repository as the build input. + BuildSourceGit BuildSourceType = "Git" + // BuildSourceDockerfile uses a Dockerfile as the start of a build + BuildSourceDockerfile BuildSourceType = "Dockerfile" + // BuildSourceBinary indicates the build will accept a Binary file as input. + BuildSourceBinary BuildSourceType = "Binary" + // BuildSourceImage indicates the build will accept an image as input + BuildSourceImage BuildSourceType = "Image" + // BuildSourceNone indicates the build has no predefined input (only valid for Source and Custom Strategies) + BuildSourceNone BuildSourceType = "None" +) + +// BuildSource is the SCM used for the build. +type BuildSource struct { + // type of build input to accept + // +k8s:conversion-gen=false + // +optional + Type BuildSourceType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"` + + // binary builds accept a binary as their input. The binary is generally assumed to be a tar, + // gzipped tar, or zip file depending on the strategy. For container image builds, this is the build + // context and an optional Dockerfile may be specified to override any Dockerfile in the + // build context. For Source builds, this is assumed to be an archive as described above. For + // Source and container image builds, if binary.asFile is set the build will receive a directory with + // a single file. contextDir may be used when an archive is provided. Custom builds will + // receive this binary as input on STDIN. + Binary *BinaryBuildSource `json:"binary,omitempty" protobuf:"bytes,2,opt,name=binary"` + + // dockerfile is the raw contents of a Dockerfile which should be built. When this option is + // specified, the FROM may be modified based on your strategy base image and additional ENV + // stanzas from your strategy environment will be added after the FROM, but before the rest + // of your Dockerfile stanzas. The Dockerfile source type may be used with other options like + // git - in those cases the Git repo will have any innate Dockerfile replaced in the context + // dir. + Dockerfile *string `json:"dockerfile,omitempty" protobuf:"bytes,3,opt,name=dockerfile"` + + // git contains optional information about git build source + Git *GitBuildSource `json:"git,omitempty" protobuf:"bytes,4,opt,name=git"` + + // images describes a set of images to be used to provide source for the build + Images []ImageSource `json:"images,omitempty" protobuf:"bytes,5,rep,name=images"` + + // contextDir specifies the sub-directory where the source code for the application exists. + // This allows to have buildable sources in directory other than root of + // repository. + ContextDir string `json:"contextDir,omitempty" protobuf:"bytes,6,opt,name=contextDir"` + + // sourceSecret is the name of a Secret that would be used for setting + // up the authentication for cloning private repository. + // The secret contains valid credentials for remote repository, where the + // data's key represent the authentication method to be used and value is + // the base64 encoded credentials. Supported auth methods are: ssh-privatekey. + SourceSecret *corev1.LocalObjectReference `json:"sourceSecret,omitempty" protobuf:"bytes,7,opt,name=sourceSecret"` + + // secrets represents a list of secrets and their destinations that will + // be used only for the build. + Secrets []SecretBuildSource `json:"secrets,omitempty" protobuf:"bytes,8,rep,name=secrets"` + + // configMaps represents a list of configMaps and their destinations that will + // be used for the build. + ConfigMaps []ConfigMapBuildSource `json:"configMaps,omitempty" protobuf:"bytes,9,rep,name=configMaps"` +} + +// ImageSource is used to describe build source that will be extracted from an image or used during a +// multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. +// A pull secret can be specified to pull the image from an external registry or override the default +// service account secret if pulling from the internal registry. Image sources can either be used to +// extract content from an image and place it into the build context along with the repository source, +// or used directly during a multi-stage container image build to allow content to be copied without overwriting +// the contents of the repository source (see the 'paths' and 'as' fields). +type ImageSource struct { + // from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to + // copy source from. + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + + // A list of image names that this source will be used in place of during a multi-stage container image + // build. For instance, a Dockerfile that uses "COPY --from=nginx:latest" will first check for an image + // source that has "nginx:latest" in this field before attempting to pull directly. If the Dockerfile + // does not reference an image source it is ignored. This field and paths may both be set, in which case + // the contents will be used twice. + // +optional + As []string `json:"as,omitempty" protobuf:"bytes,4,rep,name=as"` + + // paths is a list of source and destination paths to copy from the image. This content will be copied + // into the build context prior to starting the build. If no paths are set, the build context will + // not be altered. + // +optional + Paths []ImageSourcePath `json:"paths,omitempty" protobuf:"bytes,2,rep,name=paths"` + + // pullSecret is a reference to a secret to be used to pull the image from a registry + // If the image is pulled from the OpenShift registry, this field does not need to be set. + PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,3,opt,name=pullSecret"` +} + +// ImageSourcePath describes a path to be copied from a source image and its destination within the build directory. +type ImageSourcePath struct { + // sourcePath is the absolute path of the file or directory inside the image to + // copy to the build directory. If the source path ends in /. then the content of + // the directory will be copied, but the directory itself will not be created at the + // destination. + SourcePath string `json:"sourcePath" protobuf:"bytes,1,opt,name=sourcePath"` + + // destinationDir is the relative directory within the build directory + // where files copied from the image are placed. + DestinationDir string `json:"destinationDir" protobuf:"bytes,2,opt,name=destinationDir"` +} + +// SecretBuildSource describes a secret and its destination directory that will be +// used only at the build time. The content of the secret referenced here will +// be copied into the destination directory instead of mounting. +type SecretBuildSource struct { + // secret is a reference to an existing secret that you want to use in your + // build. + Secret corev1.LocalObjectReference `json:"secret" protobuf:"bytes,1,opt,name=secret"` + + // destinationDir is the directory where the files from the secret should be + // available for the build time. + // For the Source build strategy, these will be injected into a container + // where the assemble script runs. Later, when the script finishes, all files + // injected will be truncated to zero length. + // For the container image build strategy, these will be copied into the build + // directory, where the Dockerfile is located, so users can ADD or COPY them + // during container image build. + DestinationDir string `json:"destinationDir,omitempty" protobuf:"bytes,2,opt,name=destinationDir"` +} + +// ConfigMapBuildSource describes a configmap and its destination directory that will be +// used only at the build time. The content of the configmap referenced here will +// be copied into the destination directory instead of mounting. +type ConfigMapBuildSource struct { + // configMap is a reference to an existing configmap that you want to use in your + // build. + ConfigMap corev1.LocalObjectReference `json:"configMap" protobuf:"bytes,1,opt,name=configMap"` + + // destinationDir is the directory where the files from the configmap should be + // available for the build time. + // For the Source build strategy, these will be injected into a container + // where the assemble script runs. + // For the container image build strategy, these will be copied into the build + // directory, where the Dockerfile is located, so users can ADD or COPY them + // during container image build. + DestinationDir string `json:"destinationDir,omitempty" protobuf:"bytes,2,opt,name=destinationDir"` +} + +// BinaryBuildSource describes a binary file to be used for the Docker and Source build strategies, +// where the file will be extracted and used as the build source. +type BinaryBuildSource struct { + // asFile indicates that the provided binary input should be considered a single file + // within the build input. For example, specifying "webapp.war" would place the provided + // binary as `/webapp.war` for the builder. If left empty, the Docker and Source build + // strategies assume this file is a zip, tar, or tar.gz file and extract it as the source. + // The custom strategy receives this binary as standard input. This filename may not + // contain slashes or be '..' or '.'. + AsFile string `json:"asFile,omitempty" protobuf:"bytes,1,opt,name=asFile"` +} + +// SourceRevision is the revision or commit information from the source for the build +type SourceRevision struct { + // type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images' + // +k8s:conversion-gen=false + Type BuildSourceType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"` + + // git contains information about git-based build source + Git *GitSourceRevision `json:"git,omitempty" protobuf:"bytes,2,opt,name=git"` +} + +// GitSourceRevision is the commit information from a git source for a build +type GitSourceRevision struct { + // commit is the commit hash identifying a specific commit + Commit string `json:"commit,omitempty" protobuf:"bytes,1,opt,name=commit"` + + // author is the author of a specific commit + Author SourceControlUser `json:"author,omitempty" protobuf:"bytes,2,opt,name=author"` + + // committer is the committer of a specific commit + Committer SourceControlUser `json:"committer,omitempty" protobuf:"bytes,3,opt,name=committer"` + + // message is the description of a specific commit + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` +} + +// ProxyConfig defines what proxies to use for an operation +type ProxyConfig struct { + // httpProxy is a proxy used to reach the git repository over http + HTTPProxy *string `json:"httpProxy,omitempty" protobuf:"bytes,3,opt,name=httpProxy"` + + // httpsProxy is a proxy used to reach the git repository over https + HTTPSProxy *string `json:"httpsProxy,omitempty" protobuf:"bytes,4,opt,name=httpsProxy"` + + // noProxy is the list of domains for which the proxy should not be used + NoProxy *string `json:"noProxy,omitempty" protobuf:"bytes,5,opt,name=noProxy"` +} + +// GitBuildSource defines the parameters of a Git SCM +type GitBuildSource struct { + // uri points to the source that will be built. The structure of the source + // will depend on the type of build to run + URI string `json:"uri" protobuf:"bytes,1,opt,name=uri"` + + // ref is the branch/tag/ref to build. + Ref string `json:"ref,omitempty" protobuf:"bytes,2,opt,name=ref"` + + // proxyConfig defines the proxies to use for the git clone operation. Values + // not set here are inherited from cluster-wide build git proxy settings. + ProxyConfig `json:",inline" protobuf:"bytes,3,opt,name=proxyConfig"` +} + +// SourceControlUser defines the identity of a user of source control +type SourceControlUser struct { + // name of the source control user + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // email of the source control user + Email string `json:"email,omitempty" protobuf:"bytes,2,opt,name=email"` +} + +// BuildStrategy contains the details of how to perform a build. +type BuildStrategy struct { + // type is the kind of build strategy. + // +k8s:conversion-gen=false + // +optional + Type BuildStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=BuildStrategyType"` + + // dockerStrategy holds the parameters to the container image build strategy. + DockerStrategy *DockerBuildStrategy `json:"dockerStrategy,omitempty" protobuf:"bytes,2,opt,name=dockerStrategy"` + + // sourceStrategy holds the parameters to the Source build strategy. + SourceStrategy *SourceBuildStrategy `json:"sourceStrategy,omitempty" protobuf:"bytes,3,opt,name=sourceStrategy"` + + // customStrategy holds the parameters to the Custom build strategy + CustomStrategy *CustomBuildStrategy `json:"customStrategy,omitempty" protobuf:"bytes,4,opt,name=customStrategy"` + + // jenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. + // Deprecated: use OpenShift Pipelines + JenkinsPipelineStrategy *JenkinsPipelineBuildStrategy `json:"jenkinsPipelineStrategy,omitempty" protobuf:"bytes,5,opt,name=jenkinsPipelineStrategy"` +} + +// BuildStrategyType describes a particular way of performing a build. +type BuildStrategyType string + +// Valid values for BuildStrategyType. +const ( + // DockerBuildStrategyType performs builds using a Dockerfile. + DockerBuildStrategyType BuildStrategyType = "Docker" + + // SourceBuildStrategyType performs builds build using Source To Images with a Git repository + // and a builder image. + SourceBuildStrategyType BuildStrategyType = "Source" + + // CustomBuildStrategyType performs builds using custom builder container image. + CustomBuildStrategyType BuildStrategyType = "Custom" + + // JenkinsPipelineBuildStrategyType indicates the build will run via Jenkine Pipeline. + JenkinsPipelineBuildStrategyType BuildStrategyType = "JenkinsPipeline" +) + +// CustomBuildStrategy defines input parameters specific to Custom build. +type CustomBuildStrategy struct { + // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which + // the container image should be pulled + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,2,opt,name=pullSecret"` + + // env contains additional environment variables you want to pass into a builder container. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"` + + // exposeDockerSocket will allow running Docker commands (and build container images) from + // inside the container. + // TODO: Allow admins to enforce 'false' for this option + ExposeDockerSocket bool `json:"exposeDockerSocket,omitempty" protobuf:"varint,4,opt,name=exposeDockerSocket"` + + // forcePull describes if the controller should configure the build pod to always pull the images + // for the builder or only pull if it is not present locally + ForcePull bool `json:"forcePull,omitempty" protobuf:"varint,5,opt,name=forcePull"` + + // secrets is a list of additional secrets that will be included in the build pod + Secrets []SecretSpec `json:"secrets,omitempty" protobuf:"bytes,6,rep,name=secrets"` + + // buildAPIVersion is the requested API version for the Build object serialized and passed to the custom builder + BuildAPIVersion string `json:"buildAPIVersion,omitempty" protobuf:"bytes,7,opt,name=buildAPIVersion"` +} + +// ImageOptimizationPolicy describes what optimizations the builder can perform when building images. +type ImageOptimizationPolicy string + +const ( + // ImageOptimizationNone will generate a canonical container image as produced by the + // `container image build` command. + ImageOptimizationNone ImageOptimizationPolicy = "None" + + // ImageOptimizationSkipLayers is an experimental policy and will avoid creating + // unique layers for each dockerfile line, resulting in smaller images and saving time + // during creation. Some Dockerfile syntax is not fully supported - content added to + // a VOLUME by an earlier layer may have incorrect uid, gid, and filesystem permissions. + // If an unsupported setting is detected, the build will fail. + ImageOptimizationSkipLayers ImageOptimizationPolicy = "SkipLayers" + + // ImageOptimizationSkipLayersAndWarn is the same as SkipLayers, but will only + // warn to the build output instead of failing when unsupported syntax is detected. This + // policy is experimental. + ImageOptimizationSkipLayersAndWarn ImageOptimizationPolicy = "SkipLayersAndWarn" +) + +// DockerBuildStrategy defines input parameters specific to container image build. +type DockerBuildStrategy struct { + // from is a reference to an DockerImage, ImageStreamTag, or ImageStreamImage which overrides + // the FROM image in the Dockerfile for the build. If the Dockerfile uses multi-stage builds, + // this will replace the image in the last FROM directive of the file. + From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,1,opt,name=from"` + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,2,opt,name=pullSecret"` + + // noCache if set to true indicates that the container image build must be executed with the + // --no-cache=true flag + NoCache bool `json:"noCache,omitempty" protobuf:"varint,3,opt,name=noCache"` + + // env contains additional environment variables you want to pass into a builder container. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,4,rep,name=env"` + + // forcePull describes if the builder should pull the images from registry prior to building. + ForcePull bool `json:"forcePull,omitempty" protobuf:"varint,5,opt,name=forcePull"` + + // dockerfilePath is the path of the Dockerfile that will be used to build the container image, + // relative to the root of the context (contextDir). + // Defaults to `Dockerfile` if unset. + DockerfilePath string `json:"dockerfilePath,omitempty" protobuf:"bytes,6,opt,name=dockerfilePath"` + + // buildArgs contains build arguments that will be resolved in the Dockerfile. See + // https://docs.docker.com/engine/reference/builder/#/arg for more details. + // NOTE: Only the 'name' and 'value' fields are supported. Any settings on the 'valueFrom' field + // are ignored. + BuildArgs []corev1.EnvVar `json:"buildArgs,omitempty" protobuf:"bytes,7,rep,name=buildArgs"` + + // imageOptimizationPolicy describes what optimizations the system can use when building images + // to reduce the final size or time spent building the image. The default policy is 'None' which + // means the final build image will be equivalent to an image created by the container image build API. + // The experimental policy 'SkipLayers' will avoid commiting new layers in between each + // image step, and will fail if the Dockerfile cannot provide compatibility with the 'None' + // policy. An additional experimental policy 'SkipLayersAndWarn' is the same as + // 'SkipLayers' but simply warns if compatibility cannot be preserved. + ImageOptimizationPolicy *ImageOptimizationPolicy `json:"imageOptimizationPolicy,omitempty" protobuf:"bytes,8,opt,name=imageOptimizationPolicy,casttype=ImageOptimizationPolicy"` + + // volumes is a list of input volumes that can be mounted into the builds runtime environment. + // Only a subset of Kubernetes Volume sources are supported by builds. + // More info: https://kubernetes.io/docs/concepts/storage/volumes + // +listType=map + // +listMapKey=name + // +patchMergeKey=name + // +patchStrategy=merge + Volumes []BuildVolume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,9,opt,name=volumes"` +} + +// SourceBuildStrategy defines input parameters specific to an Source build. +type SourceBuildStrategy struct { + // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which + // the container image should be pulled + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,2,opt,name=pullSecret"` + + // env contains additional environment variables you want to pass into a builder container. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"` + + // scripts is the location of Source scripts + Scripts string `json:"scripts,omitempty" protobuf:"bytes,4,opt,name=scripts"` + + // incremental flag forces the Source build to do incremental builds if true. + Incremental *bool `json:"incremental,omitempty" protobuf:"varint,5,opt,name=incremental"` + + // forcePull describes if the builder should pull the images from registry prior to building. + ForcePull bool `json:"forcePull,omitempty" protobuf:"varint,6,opt,name=forcePull"` + + // deprecated json field, do not reuse: runtimeImage + // +k8s:protobuf-deprecated=runtimeImage,7 + + // deprecated json field, do not reuse: runtimeArtifacts + // +k8s:protobuf-deprecated=runtimeArtifacts,8 + + // volumes is a list of input volumes that can be mounted into the builds runtime environment. + // Only a subset of Kubernetes Volume sources are supported by builds. + // More info: https://kubernetes.io/docs/concepts/storage/volumes + // +listType=map + // +listMapKey=name + // +patchMergeKey=name + // +patchStrategy=merge + Volumes []BuildVolume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,9,opt,name=volumes"` +} + +// JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. +// Deprecated: use OpenShift Pipelines +type JenkinsPipelineBuildStrategy struct { + // jenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline + // relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are + // both not specified, this defaults to Jenkinsfile in the root of the specified contextDir. + JenkinsfilePath string `json:"jenkinsfilePath,omitempty" protobuf:"bytes,1,opt,name=jenkinsfilePath"` + + // jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build. + Jenkinsfile string `json:"jenkinsfile,omitempty" protobuf:"bytes,2,opt,name=jenkinsfile"` + + // env contains additional environment variables you want to pass into a build pipeline. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"` +} + +// A BuildPostCommitSpec holds a build post commit hook specification. The hook +// executes a command in a temporary container running the build output image, +// immediately after the last layer of the image is committed and before the +// image is pushed to a registry. The command is executed with the current +// working directory ($PWD) set to the image's WORKDIR. +// +// The build will be marked as failed if the hook execution fails. It will fail +// if the script or command return a non-zero exit code, or if there is any +// other error related to starting the temporary container. +// +// There are five different ways to configure the hook. As an example, all forms +// below are equivalent and will execute `rake test --verbose`. +// +// 1. Shell script: +// +// "postCommit": { +// "script": "rake test --verbose", +// } +// +// The above is a convenient form which is equivalent to: +// +// "postCommit": { +// "command": ["/bin/sh", "-ic"], +// "args": ["rake test --verbose"] +// } +// +// 2. A command as the image entrypoint: +// +// "postCommit": { +// "commit": ["rake", "test", "--verbose"] +// } +// +// Command overrides the image entrypoint in the exec form, as documented in +// Docker: https://docs.docker.com/engine/reference/builder/#entrypoint. +// +// 3. Pass arguments to the default entrypoint: +// +// "postCommit": { +// "args": ["rake", "test", "--verbose"] +// } +// +// This form is only useful if the image entrypoint can handle arguments. +// +// 4. Shell script with arguments: +// +// "postCommit": { +// "script": "rake test $1", +// "args": ["--verbose"] +// } +// +// This form is useful if you need to pass arguments that would otherwise be +// hard to quote properly in the shell script. In the script, $0 will be +// "/bin/sh" and $1, $2, etc, are the positional arguments from Args. +// +// 5. Command with arguments: +// +// "postCommit": { +// "command": ["rake", "test"], +// "args": ["--verbose"] +// } +// +// This form is equivalent to appending the arguments to the Command slice. +// +// It is invalid to provide both Script and Command simultaneously. If none of +// the fields are specified, the hook is not executed. +type BuildPostCommitSpec struct { + // command is the command to run. It may not be specified with Script. + // This might be needed if the image doesn't have `/bin/sh`, or if you + // do not want to use a shell. In all other cases, using Script might be + // more convenient. + Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"` + // args is a list of arguments that are provided to either Command, + // Script or the container image's default entrypoint. The arguments are + // placed immediately after the command to be run. + Args []string `json:"args,omitempty" protobuf:"bytes,2,rep,name=args"` + // script is a shell script to be run with `/bin/sh -ic`. It may not be + // specified with Command. Use Script when a shell script is appropriate + // to execute the post build hook, for example for running unit tests + // with `rake test`. If you need control over the image entrypoint, or + // if the image does not have `/bin/sh`, use Command and/or Args. + // The `-i` flag is needed to support CentOS and RHEL images that use + // Software Collections (SCL), in order to have the appropriate + // collections enabled in the shell. E.g., in the Ruby image, this is + // necessary to make `ruby`, `bundle` and other binaries available in + // the PATH. + Script string `json:"script,omitempty" protobuf:"bytes,3,opt,name=script"` +} + +// BuildOutput is input to a build strategy and describes the container image that the strategy +// should produce. +type BuildOutput struct { + // to defines an optional location to push the output of this build to. + // Kind must be one of 'ImageStreamTag' or 'DockerImage'. + // This value will be used to look up a container image repository to push to. + // In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of + // the build unless Namespace is specified. + To *corev1.ObjectReference `json:"to,omitempty" protobuf:"bytes,1,opt,name=to"` + + // pushSecret is the name of a Secret that would be used for setting + // up the authentication for executing the Docker push to authentication + // enabled Docker Registry (or Docker Hub). + PushSecret *corev1.LocalObjectReference `json:"pushSecret,omitempty" protobuf:"bytes,2,opt,name=pushSecret"` + + // imageLabels define a list of labels that are applied to the resulting image. If there + // are multiple labels with the same name then the last one in the list is used. + ImageLabels []ImageLabel `json:"imageLabels,omitempty" protobuf:"bytes,3,rep,name=imageLabels"` +} + +// ImageLabel represents a label applied to the resulting image. +type ImageLabel struct { + // name defines the name of the label. It must have non-zero length. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // value defines the literal value of the label. + Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` +} + +// +genclient +// +genclient:method=Instantiate,verb=create,subresource=instantiate,input=BuildRequest,result=Build +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Build configurations define a build process for new container images. There are three types of builds possible - a container image build using a Dockerfile, a Source-to-Image build that uses a specially prepared base image that accepts source code that it can make runnable, and a custom build that can run // arbitrary container images as a base and accept the build parameters. Builds run on the cluster and on completion are pushed to the container image registry specified in the "output" section. A build can be triggered via a webhook, when the base image changes, or when a user manually requests a new build be // created. +// +// Each build created by a build configuration is numbered and refers back to its parent configuration. Multiple builds can be triggered at once. Builds that do not have "output" set can be used to test code or run a verification build. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BuildConfig struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec holds all the input necessary to produce a new build, and the conditions when + // to trigger them. + Spec BuildConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // status holds any relevant information about a build config + // +optional + Status BuildConfigStatus `json:"status" protobuf:"bytes,3,opt,name=status"` +} + +// BuildConfigSpec describes when and how builds are created +type BuildConfigSpec struct { + + //triggers determine how new Builds can be launched from a BuildConfig. If + //no triggers are defined, a new build can only occur as a result of an + //explicit client build creation. + // +optional + Triggers []BuildTriggerPolicy `json:"triggers,omitempty" protobuf:"bytes,1,rep,name=triggers"` + + // runPolicy describes how the new build created from this build + // configuration will be scheduled for execution. + // This is optional, if not specified we default to "Serial". + RunPolicy BuildRunPolicy `json:"runPolicy,omitempty" protobuf:"bytes,2,opt,name=runPolicy,casttype=BuildRunPolicy"` + + // CommonSpec is the desired build specification + CommonSpec `json:",inline" protobuf:"bytes,3,opt,name=commonSpec"` + + // successfulBuildsHistoryLimit is the number of old successful builds to retain. + // When a BuildConfig is created, the 5 most recent successful builds are retained unless this value is set. + // If removed after the BuildConfig has been created, all successful builds are retained. + SuccessfulBuildsHistoryLimit *int32 `json:"successfulBuildsHistoryLimit,omitempty" protobuf:"varint,4,opt,name=successfulBuildsHistoryLimit"` + + // failedBuildsHistoryLimit is the number of old failed builds to retain. + // When a BuildConfig is created, the 5 most recent failed builds are retained unless this value is set. + // If removed after the BuildConfig has been created, all failed builds are retained. + FailedBuildsHistoryLimit *int32 `json:"failedBuildsHistoryLimit,omitempty" protobuf:"varint,5,opt,name=failedBuildsHistoryLimit"` +} + +// BuildRunPolicy defines the behaviour of how the new builds are executed +// from the existing build configuration. +type BuildRunPolicy string + +const ( + // BuildRunPolicyParallel schedules new builds immediately after they are + // created. Builds will be executed in parallel. + BuildRunPolicyParallel BuildRunPolicy = "Parallel" + + // BuildRunPolicySerial schedules new builds to execute in a sequence as + // they are created. Every build gets queued up and will execute when the + // previous build completes. This is the default policy. + BuildRunPolicySerial BuildRunPolicy = "Serial" + + // BuildRunPolicySerialLatestOnly schedules only the latest build to execute, + // cancelling all the previously queued build. + BuildRunPolicySerialLatestOnly BuildRunPolicy = "SerialLatestOnly" +) + +// BuildConfigStatus contains current state of the build config object. +type BuildConfigStatus struct { + // lastVersion is used to inform about number of last triggered build. + LastVersion int64 `json:"lastVersion" protobuf:"varint,1,opt,name=lastVersion"` + + // imageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, + // including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry + // in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger. + ImageChangeTriggers []ImageChangeTriggerStatus `json:"imageChangeTriggers,omitempty" protobuf:"bytes,2,rep,name=imageChangeTriggers"` +} + +// SecretLocalReference contains information that points to the local secret being used +type SecretLocalReference struct { + // name is the name of the resource in the same namespace being referenced + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// WebHookTrigger is a trigger that gets invoked using a webhook type of post +type WebHookTrigger struct { + // secret used to validate requests. + // Deprecated: use SecretReference instead. + Secret string `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"` + + // allowEnv determines whether the webhook can set environment variables; can only + // be set to true for GenericWebHook. + AllowEnv bool `json:"allowEnv,omitempty" protobuf:"varint,2,opt,name=allowEnv"` + + // secretReference is a reference to a secret in the same namespace, + // containing the value to be validated when the webhook is invoked. + // The secret being referenced must contain a key named "WebHookSecretKey", the value + // of which will be checked against the value supplied in the webhook invocation. + SecretReference *SecretLocalReference `json:"secretReference,omitempty" protobuf:"bytes,3,opt,name=secretReference"` +} + +// ImageChangeTrigger allows builds to be triggered when an ImageStream changes +type ImageChangeTrigger struct { + // lastTriggeredImageID is used internally by the ImageChangeController to save last + // used image ID for build + // This field is deprecated and will be removed in a future release. + // Deprecated + LastTriggeredImageID string `json:"lastTriggeredImageID,omitempty" protobuf:"bytes,1,opt,name=lastTriggeredImageID"` + + // from is a reference to an ImageStreamTag that will trigger a build when updated + // It is optional. If no From is specified, the From image from the build strategy + // will be used. Only one ImageChangeTrigger with an empty From reference is allowed in + // a build configuration. + From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,2,opt,name=from"` + + // paused is true if this trigger is temporarily disabled. Optional. + Paused bool `json:"paused,omitempty" protobuf:"varint,3,opt,name=paused"` +} + +// ImageStreamTagReference references the ImageStreamTag in an image change trigger by namespace and name. +type ImageStreamTagReference struct { + // namespace is the namespace where the ImageStreamTag for an ImageChangeTrigger is located + Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` + + // name is the name of the ImageStreamTag for an ImageChangeTrigger + Name string `json:"name,omitempty" protobuf:"bytes,2,opt,name=name"` +} + +// ImageChangeTriggerStatus tracks the latest resolved status of the associated ImageChangeTrigger policy +// specified in the BuildConfigSpec.Triggers struct. +type ImageChangeTriggerStatus struct { + // lastTriggeredImageID represents the sha/id of the ImageStreamTag when a Build for this BuildConfig was started. + // The lastTriggeredImageID is updated each time a Build for this BuildConfig is started, even if this ImageStreamTag is not the reason the Build is started. + LastTriggeredImageID string `json:"lastTriggeredImageID,omitempty" protobuf:"bytes,1,opt,name=lastTriggeredImageID"` + + // from is the ImageStreamTag that is the source of the trigger. + From ImageStreamTagReference `json:"from,omitempty" protobuf:"bytes,2,opt,name=from"` + + // lastTriggerTime is the last time this particular ImageStreamTag triggered a Build to start. + // This field is only updated when this trigger specifically started a Build. + LastTriggerTime metav1.Time `json:"lastTriggerTime,omitempty" protobuf:"bytes,3,opt,name=lastTriggerTime"` +} + +// BuildTriggerPolicy describes a policy for a single trigger that results in a new Build. +type BuildTriggerPolicy struct { + // type is the type of build trigger. Valid values: + // + // - GitHub + // GitHubWebHookBuildTriggerType represents a trigger that launches builds on + // GitHub webhook invocations + // + // - Generic + // GenericWebHookBuildTriggerType represents a trigger that launches builds on + // generic webhook invocations + // + // - GitLab + // GitLabWebHookBuildTriggerType represents a trigger that launches builds on + // GitLab webhook invocations + // + // - Bitbucket + // BitbucketWebHookBuildTriggerType represents a trigger that launches builds on + // Bitbucket webhook invocations + // + // - ImageChange + // ImageChangeBuildTriggerType represents a trigger that launches builds on + // availability of a new version of an image + // + // - ConfigChange + // ConfigChangeBuildTriggerType will trigger a build on an initial build config creation + // WARNING: In the future the behavior will change to trigger a build on any config change + Type BuildTriggerType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildTriggerType"` + + // github contains the parameters for a GitHub webhook type of trigger + GitHubWebHook *WebHookTrigger `json:"github,omitempty" protobuf:"bytes,2,opt,name=github"` + + // generic contains the parameters for a Generic webhook type of trigger + GenericWebHook *WebHookTrigger `json:"generic,omitempty" protobuf:"bytes,3,opt,name=generic"` + + // imageChange contains parameters for an ImageChange type of trigger + ImageChange *ImageChangeTrigger `json:"imageChange,omitempty" protobuf:"bytes,4,opt,name=imageChange"` + + // GitLabWebHook contains the parameters for a GitLab webhook type of trigger + GitLabWebHook *WebHookTrigger `json:"gitlab,omitempty" protobuf:"bytes,5,opt,name=gitlab"` + + // BitbucketWebHook contains the parameters for a Bitbucket webhook type of + // trigger + BitbucketWebHook *WebHookTrigger `json:"bitbucket,omitempty" protobuf:"bytes,6,opt,name=bitbucket"` +} + +// BuildTriggerType refers to a specific BuildTriggerPolicy implementation. +type BuildTriggerType string + +const ( + // GitHubWebHookBuildTriggerType represents a trigger that launches builds on + // GitHub webhook invocations + GitHubWebHookBuildTriggerType BuildTriggerType = "GitHub" + GitHubWebHookBuildTriggerTypeDeprecated BuildTriggerType = "github" + + // GenericWebHookBuildTriggerType represents a trigger that launches builds on + // generic webhook invocations + GenericWebHookBuildTriggerType BuildTriggerType = "Generic" + GenericWebHookBuildTriggerTypeDeprecated BuildTriggerType = "generic" + + // GitLabWebHookBuildTriggerType represents a trigger that launches builds on + // GitLab webhook invocations + GitLabWebHookBuildTriggerType BuildTriggerType = "GitLab" + + // BitbucketWebHookBuildTriggerType represents a trigger that launches builds on + // Bitbucket webhook invocations + BitbucketWebHookBuildTriggerType BuildTriggerType = "Bitbucket" + + // ImageChangeBuildTriggerType represents a trigger that launches builds on + // availability of a new version of an image + ImageChangeBuildTriggerType BuildTriggerType = "ImageChange" + ImageChangeBuildTriggerTypeDeprecated BuildTriggerType = "imageChange" + + // ConfigChangeBuildTriggerType will trigger a build on an initial build config creation + // WARNING: In the future the behavior will change to trigger a build on any config change + ConfigChangeBuildTriggerType BuildTriggerType = "ConfigChange" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildList is a collection of Builds. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BuildList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of builds + Items []Build `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildConfigList is a collection of BuildConfigs. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BuildConfigList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of build configs + Items []BuildConfig `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// GenericWebHookEvent is the payload expected for a generic webhook post +type GenericWebHookEvent struct { + // type is the type of source repository + // +k8s:conversion-gen=false + Type BuildSourceType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"` + + // git is the git information if the Type is BuildSourceGit + Git *GitInfo `json:"git,omitempty" protobuf:"bytes,2,opt,name=git"` + + // env contains additional environment variables you want to pass into a builder container. + // ValueFrom is not supported. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"` + + // dockerStrategyOptions contains additional docker-strategy specific options for the build + DockerStrategyOptions *DockerStrategyOptions `json:"dockerStrategyOptions,omitempty" protobuf:"bytes,4,opt,name=dockerStrategyOptions"` +} + +// GitInfo is the aggregated git information for a generic webhook post +type GitInfo struct { + GitBuildSource `json:",inline" protobuf:"bytes,1,opt,name=gitBuildSource"` + GitSourceRevision `json:",inline" protobuf:"bytes,2,opt,name=gitSourceRevision"` + + // refs is a list of GitRefs for the provided repo - generally sent + // when used from a post-receive hook. This field is optional and is + // used when sending multiple refs + Refs []GitRefInfo `json:"refs" protobuf:"bytes,3,rep,name=refs"` +} + +// GitRefInfo is a single ref +type GitRefInfo struct { + GitBuildSource `json:",inline" protobuf:"bytes,1,opt,name=gitBuildSource"` + GitSourceRevision `json:",inline" protobuf:"bytes,2,opt,name=gitSourceRevision"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildLog is the (unused) resource associated with the build log redirector +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BuildLog struct { + metav1.TypeMeta `json:",inline"` +} + +// DockerStrategyOptions contains extra strategy options for container image builds +type DockerStrategyOptions struct { + // Args contains any build arguments that are to be passed to Docker. See + // https://docs.docker.com/engine/reference/builder/#/arg for more details + BuildArgs []corev1.EnvVar `json:"buildArgs,omitempty" protobuf:"bytes,1,rep,name=buildArgs"` + + // noCache overrides the docker-strategy noCache option in the build config + NoCache *bool `json:"noCache,omitempty" protobuf:"varint,2,opt,name=noCache"` +} + +// SourceStrategyOptions contains extra strategy options for Source builds +type SourceStrategyOptions struct { + // incremental overrides the source-strategy incremental option in the build config + Incremental *bool `json:"incremental,omitempty" protobuf:"varint,1,opt,name=incremental"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildRequest is the resource used to pass parameters to build generator +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BuildRequest struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // revision is the information from the source for a specific repo snapshot. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"` + + // triggeredByImage is the Image that triggered this build. + TriggeredByImage *corev1.ObjectReference `json:"triggeredByImage,omitempty" protobuf:"bytes,3,opt,name=triggeredByImage"` + + // from is the reference to the ImageStreamTag that triggered the build. + From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,4,opt,name=from"` + + // binary indicates a request to build from a binary provided to the builder + Binary *BinaryBuildSource `json:"binary,omitempty" protobuf:"bytes,5,opt,name=binary"` + + // lastVersion (optional) is the LastVersion of the BuildConfig that was used + // to generate the build. If the BuildConfig in the generator doesn't match, a build will + // not be generated. + LastVersion *int64 `json:"lastVersion,omitempty" protobuf:"varint,6,opt,name=lastVersion"` + + // env contains additional environment variables you want to pass into a builder container. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,7,rep,name=env"` + + // triggeredBy describes which triggers started the most recent update to the + // build configuration and contains information about those triggers. + TriggeredBy []BuildTriggerCause `json:"triggeredBy,omitempty" protobuf:"bytes,8,rep,name=triggeredBy"` + + // dockerStrategyOptions contains additional docker-strategy specific options for the build + DockerStrategyOptions *DockerStrategyOptions `json:"dockerStrategyOptions,omitempty" protobuf:"bytes,9,opt,name=dockerStrategyOptions"` + + // sourceStrategyOptions contains additional source-strategy specific options for the build + SourceStrategyOptions *SourceStrategyOptions `json:"sourceStrategyOptions,omitempty" protobuf:"bytes,10,opt,name=sourceStrategyOptions"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BinaryBuildRequestOptions are the options required to fully speficy a binary build request +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BinaryBuildRequestOptions struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // asFile determines if the binary should be created as a file within the source rather than extracted as an archive + AsFile string `json:"asFile,omitempty" protobuf:"bytes,2,opt,name=asFile"` + + // TODO: Improve map[string][]string conversion so we can handled nested objects + + // revision.commit is the value identifying a specific commit + Commit string `json:"revision.commit,omitempty" protobuf:"bytes,3,opt,name=revisionCommit"` + + // revision.message is the description of a specific commit + Message string `json:"revision.message,omitempty" protobuf:"bytes,4,opt,name=revisionMessage"` + + // revision.authorName of the source control user + AuthorName string `json:"revision.authorName,omitempty" protobuf:"bytes,5,opt,name=revisionAuthorName"` + + // revision.authorEmail of the source control user + AuthorEmail string `json:"revision.authorEmail,omitempty" protobuf:"bytes,6,opt,name=revisionAuthorEmail"` + + // revision.committerName of the source control user + CommitterName string `json:"revision.committerName,omitempty" protobuf:"bytes,7,opt,name=revisionCommitterName"` + + // revision.committerEmail of the source control user + CommitterEmail string `json:"revision.committerEmail,omitempty" protobuf:"bytes,8,opt,name=revisionCommitterEmail"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildLogOptions is the REST options for a build log +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BuildLogOptions struct { + metav1.TypeMeta `json:",inline"` + + // cointainer for which to stream logs. Defaults to only container if there is one container in the pod. + Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"` + // follow if true indicates that the build log should be streamed until + // the build terminates. + Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"` + // previous returns previous build logs. Defaults to false. + Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"` + // sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"` + // sinceTime is an RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"` + // timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"` + // tailLines, If set, is the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"` + // limitBytes, If set, is the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` + + // nowait if true causes the call to return immediately even if the build + // is not available yet. Otherwise the server will wait until the build has started. + // TODO: Fix the tag to 'noWait' in v2 + NoWait bool `json:"nowait,omitempty" protobuf:"varint,9,opt,name=nowait"` + + // version of the build for which to view logs. + Version *int64 `json:"version,omitempty" protobuf:"varint,10,opt,name=version"` + + // insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the + // serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver + // and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real + // kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the + // connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept + // the actual log data coming from the real kubelet). + // +optional + InsecureSkipTLSVerifyBackend bool `json:"insecureSkipTLSVerifyBackend,omitempty" protobuf:"varint,11,opt,name=insecureSkipTLSVerifyBackend"` +} + +// SecretSpec specifies a secret to be included in a build pod and its corresponding mount point +type SecretSpec struct { + // secretSource is a reference to the secret + SecretSource corev1.LocalObjectReference `json:"secretSource" protobuf:"bytes,1,opt,name=secretSource"` + + // mountPath is the path at which to mount the secret + MountPath string `json:"mountPath" protobuf:"bytes,2,opt,name=mountPath"` +} + +// BuildVolume describes a volume that is made available to build pods, +// such that it can be mounted into buildah's runtime environment. +// Only a subset of Kubernetes Volume sources are supported. +type BuildVolume struct { + // name is a unique identifier for this BuildVolume. + // It must conform to the Kubernetes DNS label standard and be unique within the pod. + // Names that collide with those added by the build controller will result in a + // failed build with an error message detailing which name caused the error. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // +required + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // source represents the location and type of the mounted volume. + // +required + Source BuildVolumeSource `json:"source" protobuf:"bytes,2,opt,name=source"` + + // mounts represents the location of the volume in the image build container + // +required + // +listType=map + // +listMapKey=destinationPath + // +patchMergeKey=destinationPath + // +patchStrategy=merge + Mounts []BuildVolumeMount `json:"mounts" patchStrategy:"merge" patchMergeKey:"destinationPath" protobuf:"bytes,3,opt,name=mounts"` +} + +// BuildVolumeSourceType represents a build volume source type +type BuildVolumeSourceType string + +const ( + // BuildVolumeSourceTypeSecret is the Secret build source volume type + BuildVolumeSourceTypeSecret BuildVolumeSourceType = "Secret" + + // BuildVolumeSourceTypeConfigmap is the ConfigMap build source volume type + BuildVolumeSourceTypeConfigMap BuildVolumeSourceType = "ConfigMap" + + // BuildVolumeSourceTypeCSI is the CSI build source volume type + BuildVolumeSourceTypeCSI BuildVolumeSourceType = "CSI" +) + +// BuildVolumeSource represents the source of a volume to mount +// Only one of its supported types may be specified at any given time. +type BuildVolumeSource struct { + + // type is the BuildVolumeSourceType for the volume source. + // Type must match the populated volume source. + // Valid types are: Secret, ConfigMap + Type BuildVolumeSourceType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildVolumeSourceType"` + + // secret represents a Secret that should populate this volume. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + // +optional + Secret *corev1.SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` + + // configMap represents a ConfigMap that should populate this volume + // +optional + ConfigMap *corev1.ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"` + + // csi represents ephemeral storage provided by external CSI drivers which support this capability + // +optional + CSI *corev1.CSIVolumeSource `json:"csi,omitempty" protobuf:"bytes,4,opt,name=csi"` +} + +// BuildVolumeMount describes the mounting of a Volume within buildah's runtime environment. +type BuildVolumeMount struct { + // destinationPath is the path within the buildah runtime environment at which the volume should be mounted. + // The transient mount within the build image and the backing volume will both be mounted read only. + // Must be an absolute path, must not contain '..' or ':', and must not collide with a destination path generated + // by the builder process + // Paths that collide with those added by the build controller will result in a + // failed build with an error message detailing which path caused the error. + DestinationPath string `json:"destinationPath" protobuf:"bytes,1,opt,name=destinationPath"` +} diff --git a/vendor/github.com/openshift/api/build/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/build/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..d36b28c82bc41 --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/zz_generated.deepcopy.go @@ -0,0 +1,1610 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BinaryBuildRequestOptions) DeepCopyInto(out *BinaryBuildRequestOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BinaryBuildRequestOptions. +func (in *BinaryBuildRequestOptions) DeepCopy() *BinaryBuildRequestOptions { + if in == nil { + return nil + } + out := new(BinaryBuildRequestOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BinaryBuildRequestOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BinaryBuildSource) DeepCopyInto(out *BinaryBuildSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BinaryBuildSource. +func (in *BinaryBuildSource) DeepCopy() *BinaryBuildSource { + if in == nil { + return nil + } + out := new(BinaryBuildSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BitbucketWebHookCause) DeepCopyInto(out *BitbucketWebHookCause) { + *out = *in + in.CommonWebHookCause.DeepCopyInto(&out.CommonWebHookCause) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BitbucketWebHookCause. +func (in *BitbucketWebHookCause) DeepCopy() *BitbucketWebHookCause { + if in == nil { + return nil + } + out := new(BitbucketWebHookCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Build) DeepCopyInto(out *Build) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Build. +func (in *Build) DeepCopy() *Build { + if in == nil { + return nil + } + out := new(Build) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Build) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildCondition) DeepCopyInto(out *BuildCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildCondition. +func (in *BuildCondition) DeepCopy() *BuildCondition { + if in == nil { + return nil + } + out := new(BuildCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildConfig) DeepCopyInto(out *BuildConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfig. +func (in *BuildConfig) DeepCopy() *BuildConfig { + if in == nil { + return nil + } + out := new(BuildConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildConfigList) DeepCopyInto(out *BuildConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BuildConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfigList. +func (in *BuildConfigList) DeepCopy() *BuildConfigList { + if in == nil { + return nil + } + out := new(BuildConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildConfigSpec) DeepCopyInto(out *BuildConfigSpec) { + *out = *in + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make([]BuildTriggerPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.CommonSpec.DeepCopyInto(&out.CommonSpec) + if in.SuccessfulBuildsHistoryLimit != nil { + in, out := &in.SuccessfulBuildsHistoryLimit, &out.SuccessfulBuildsHistoryLimit + *out = new(int32) + **out = **in + } + if in.FailedBuildsHistoryLimit != nil { + in, out := &in.FailedBuildsHistoryLimit, &out.FailedBuildsHistoryLimit + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfigSpec. +func (in *BuildConfigSpec) DeepCopy() *BuildConfigSpec { + if in == nil { + return nil + } + out := new(BuildConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildConfigStatus) DeepCopyInto(out *BuildConfigStatus) { + *out = *in + if in.ImageChangeTriggers != nil { + in, out := &in.ImageChangeTriggers, &out.ImageChangeTriggers + *out = make([]ImageChangeTriggerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfigStatus. +func (in *BuildConfigStatus) DeepCopy() *BuildConfigStatus { + if in == nil { + return nil + } + out := new(BuildConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildList) DeepCopyInto(out *BuildList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Build, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildList. +func (in *BuildList) DeepCopy() *BuildList { + if in == nil { + return nil + } + out := new(BuildList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildLog) DeepCopyInto(out *BuildLog) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildLog. +func (in *BuildLog) DeepCopy() *BuildLog { + if in == nil { + return nil + } + out := new(BuildLog) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildLog) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildLogOptions) DeepCopyInto(out *BuildLogOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.SinceSeconds != nil { + in, out := &in.SinceSeconds, &out.SinceSeconds + *out = new(int64) + **out = **in + } + if in.SinceTime != nil { + in, out := &in.SinceTime, &out.SinceTime + *out = (*in).DeepCopy() + } + if in.TailLines != nil { + in, out := &in.TailLines, &out.TailLines + *out = new(int64) + **out = **in + } + if in.LimitBytes != nil { + in, out := &in.LimitBytes, &out.LimitBytes + *out = new(int64) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildLogOptions. +func (in *BuildLogOptions) DeepCopy() *BuildLogOptions { + if in == nil { + return nil + } + out := new(BuildLogOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildLogOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildOutput) DeepCopyInto(out *BuildOutput) { + *out = *in + if in.To != nil { + in, out := &in.To, &out.To + *out = new(corev1.ObjectReference) + **out = **in + } + if in.PushSecret != nil { + in, out := &in.PushSecret, &out.PushSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]ImageLabel, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildOutput. +func (in *BuildOutput) DeepCopy() *BuildOutput { + if in == nil { + return nil + } + out := new(BuildOutput) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildPostCommitSpec) DeepCopyInto(out *BuildPostCommitSpec) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildPostCommitSpec. +func (in *BuildPostCommitSpec) DeepCopy() *BuildPostCommitSpec { + if in == nil { + return nil + } + out := new(BuildPostCommitSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildRequest) DeepCopyInto(out *BuildRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + (*in).DeepCopyInto(*out) + } + if in.TriggeredByImage != nil { + in, out := &in.TriggeredByImage, &out.TriggeredByImage + *out = new(corev1.ObjectReference) + **out = **in + } + if in.From != nil { + in, out := &in.From, &out.From + *out = new(corev1.ObjectReference) + **out = **in + } + if in.Binary != nil { + in, out := &in.Binary, &out.Binary + *out = new(BinaryBuildSource) + **out = **in + } + if in.LastVersion != nil { + in, out := &in.LastVersion, &out.LastVersion + *out = new(int64) + **out = **in + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TriggeredBy != nil { + in, out := &in.TriggeredBy, &out.TriggeredBy + *out = make([]BuildTriggerCause, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DockerStrategyOptions != nil { + in, out := &in.DockerStrategyOptions, &out.DockerStrategyOptions + *out = new(DockerStrategyOptions) + (*in).DeepCopyInto(*out) + } + if in.SourceStrategyOptions != nil { + in, out := &in.SourceStrategyOptions, &out.SourceStrategyOptions + *out = new(SourceStrategyOptions) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildRequest. +func (in *BuildRequest) DeepCopy() *BuildRequest { + if in == nil { + return nil + } + out := new(BuildRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildSource) DeepCopyInto(out *BuildSource) { + *out = *in + if in.Binary != nil { + in, out := &in.Binary, &out.Binary + *out = new(BinaryBuildSource) + **out = **in + } + if in.Dockerfile != nil { + in, out := &in.Dockerfile, &out.Dockerfile + *out = new(string) + **out = **in + } + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(GitBuildSource) + (*in).DeepCopyInto(*out) + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ImageSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceSecret != nil { + in, out := &in.SourceSecret, &out.SourceSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretBuildSource, len(*in)) + copy(*out, *in) + } + if in.ConfigMaps != nil { + in, out := &in.ConfigMaps, &out.ConfigMaps + *out = make([]ConfigMapBuildSource, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSource. +func (in *BuildSource) DeepCopy() *BuildSource { + if in == nil { + return nil + } + out := new(BuildSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildSpec) DeepCopyInto(out *BuildSpec) { + *out = *in + in.CommonSpec.DeepCopyInto(&out.CommonSpec) + if in.TriggeredBy != nil { + in, out := &in.TriggeredBy, &out.TriggeredBy + *out = make([]BuildTriggerCause, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSpec. +func (in *BuildSpec) DeepCopy() *BuildSpec { + if in == nil { + return nil + } + out := new(BuildSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildStatus) DeepCopyInto(out *BuildStatus) { + *out = *in + if in.StartTimestamp != nil { + in, out := &in.StartTimestamp, &out.StartTimestamp + *out = (*in).DeepCopy() + } + if in.CompletionTimestamp != nil { + in, out := &in.CompletionTimestamp, &out.CompletionTimestamp + *out = (*in).DeepCopy() + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(corev1.ObjectReference) + **out = **in + } + in.Output.DeepCopyInto(&out.Output) + if in.Stages != nil { + in, out := &in.Stages, &out.Stages + *out = make([]StageInfo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]BuildCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStatus. +func (in *BuildStatus) DeepCopy() *BuildStatus { + if in == nil { + return nil + } + out := new(BuildStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildStatusOutput) DeepCopyInto(out *BuildStatusOutput) { + *out = *in + if in.To != nil { + in, out := &in.To, &out.To + *out = new(BuildStatusOutputTo) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStatusOutput. +func (in *BuildStatusOutput) DeepCopy() *BuildStatusOutput { + if in == nil { + return nil + } + out := new(BuildStatusOutput) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildStatusOutputTo) DeepCopyInto(out *BuildStatusOutputTo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStatusOutputTo. +func (in *BuildStatusOutputTo) DeepCopy() *BuildStatusOutputTo { + if in == nil { + return nil + } + out := new(BuildStatusOutputTo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildStrategy) DeepCopyInto(out *BuildStrategy) { + *out = *in + if in.DockerStrategy != nil { + in, out := &in.DockerStrategy, &out.DockerStrategy + *out = new(DockerBuildStrategy) + (*in).DeepCopyInto(*out) + } + if in.SourceStrategy != nil { + in, out := &in.SourceStrategy, &out.SourceStrategy + *out = new(SourceBuildStrategy) + (*in).DeepCopyInto(*out) + } + if in.CustomStrategy != nil { + in, out := &in.CustomStrategy, &out.CustomStrategy + *out = new(CustomBuildStrategy) + (*in).DeepCopyInto(*out) + } + if in.JenkinsPipelineStrategy != nil { + in, out := &in.JenkinsPipelineStrategy, &out.JenkinsPipelineStrategy + *out = new(JenkinsPipelineBuildStrategy) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStrategy. +func (in *BuildStrategy) DeepCopy() *BuildStrategy { + if in == nil { + return nil + } + out := new(BuildStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildTriggerCause) DeepCopyInto(out *BuildTriggerCause) { + *out = *in + if in.GenericWebHook != nil { + in, out := &in.GenericWebHook, &out.GenericWebHook + *out = new(GenericWebHookCause) + (*in).DeepCopyInto(*out) + } + if in.GitHubWebHook != nil { + in, out := &in.GitHubWebHook, &out.GitHubWebHook + *out = new(GitHubWebHookCause) + (*in).DeepCopyInto(*out) + } + if in.ImageChangeBuild != nil { + in, out := &in.ImageChangeBuild, &out.ImageChangeBuild + *out = new(ImageChangeCause) + (*in).DeepCopyInto(*out) + } + if in.GitLabWebHook != nil { + in, out := &in.GitLabWebHook, &out.GitLabWebHook + *out = new(GitLabWebHookCause) + (*in).DeepCopyInto(*out) + } + if in.BitbucketWebHook != nil { + in, out := &in.BitbucketWebHook, &out.BitbucketWebHook + *out = new(BitbucketWebHookCause) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildTriggerCause. +func (in *BuildTriggerCause) DeepCopy() *BuildTriggerCause { + if in == nil { + return nil + } + out := new(BuildTriggerCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildTriggerPolicy) DeepCopyInto(out *BuildTriggerPolicy) { + *out = *in + if in.GitHubWebHook != nil { + in, out := &in.GitHubWebHook, &out.GitHubWebHook + *out = new(WebHookTrigger) + (*in).DeepCopyInto(*out) + } + if in.GenericWebHook != nil { + in, out := &in.GenericWebHook, &out.GenericWebHook + *out = new(WebHookTrigger) + (*in).DeepCopyInto(*out) + } + if in.ImageChange != nil { + in, out := &in.ImageChange, &out.ImageChange + *out = new(ImageChangeTrigger) + (*in).DeepCopyInto(*out) + } + if in.GitLabWebHook != nil { + in, out := &in.GitLabWebHook, &out.GitLabWebHook + *out = new(WebHookTrigger) + (*in).DeepCopyInto(*out) + } + if in.BitbucketWebHook != nil { + in, out := &in.BitbucketWebHook, &out.BitbucketWebHook + *out = new(WebHookTrigger) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildTriggerPolicy. +func (in *BuildTriggerPolicy) DeepCopy() *BuildTriggerPolicy { + if in == nil { + return nil + } + out := new(BuildTriggerPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildVolume) DeepCopyInto(out *BuildVolume) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + if in.Mounts != nil { + in, out := &in.Mounts, &out.Mounts + *out = make([]BuildVolumeMount, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildVolume. +func (in *BuildVolume) DeepCopy() *BuildVolume { + if in == nil { + return nil + } + out := new(BuildVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildVolumeMount) DeepCopyInto(out *BuildVolumeMount) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildVolumeMount. +func (in *BuildVolumeMount) DeepCopy() *BuildVolumeMount { + if in == nil { + return nil + } + out := new(BuildVolumeMount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildVolumeSource) DeepCopyInto(out *BuildVolumeSource) { + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(corev1.SecretVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(corev1.ConfigMapVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + *out = new(corev1.CSIVolumeSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildVolumeSource. +func (in *BuildVolumeSource) DeepCopy() *BuildVolumeSource { + if in == nil { + return nil + } + out := new(BuildVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonSpec) DeepCopyInto(out *CommonSpec) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + (*in).DeepCopyInto(*out) + } + in.Strategy.DeepCopyInto(&out.Strategy) + in.Output.DeepCopyInto(&out.Output) + in.Resources.DeepCopyInto(&out.Resources) + in.PostCommit.DeepCopyInto(&out.PostCommit) + if in.CompletionDeadlineSeconds != nil { + in, out := &in.CompletionDeadlineSeconds, &out.CompletionDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(OptionalNodeSelector, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.MountTrustedCA != nil { + in, out := &in.MountTrustedCA, &out.MountTrustedCA + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonSpec. +func (in *CommonSpec) DeepCopy() *CommonSpec { + if in == nil { + return nil + } + out := new(CommonSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonWebHookCause) DeepCopyInto(out *CommonWebHookCause) { + *out = *in + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonWebHookCause. +func (in *CommonWebHookCause) DeepCopy() *CommonWebHookCause { + if in == nil { + return nil + } + out := new(CommonWebHookCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapBuildSource) DeepCopyInto(out *ConfigMapBuildSource) { + *out = *in + out.ConfigMap = in.ConfigMap + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapBuildSource. +func (in *ConfigMapBuildSource) DeepCopy() *ConfigMapBuildSource { + if in == nil { + return nil + } + out := new(ConfigMapBuildSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomBuildStrategy) DeepCopyInto(out *CustomBuildStrategy) { + *out = *in + out.From = in.From + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretSpec, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomBuildStrategy. +func (in *CustomBuildStrategy) DeepCopy() *CustomBuildStrategy { + if in == nil { + return nil + } + out := new(CustomBuildStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerBuildStrategy) DeepCopyInto(out *DockerBuildStrategy) { + *out = *in + if in.From != nil { + in, out := &in.From, &out.From + *out = new(corev1.ObjectReference) + **out = **in + } + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BuildArgs != nil { + in, out := &in.BuildArgs, &out.BuildArgs + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ImageOptimizationPolicy != nil { + in, out := &in.ImageOptimizationPolicy, &out.ImageOptimizationPolicy + *out = new(ImageOptimizationPolicy) + **out = **in + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]BuildVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerBuildStrategy. +func (in *DockerBuildStrategy) DeepCopy() *DockerBuildStrategy { + if in == nil { + return nil + } + out := new(DockerBuildStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerStrategyOptions) DeepCopyInto(out *DockerStrategyOptions) { + *out = *in + if in.BuildArgs != nil { + in, out := &in.BuildArgs, &out.BuildArgs + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NoCache != nil { + in, out := &in.NoCache, &out.NoCache + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerStrategyOptions. +func (in *DockerStrategyOptions) DeepCopy() *DockerStrategyOptions { + if in == nil { + return nil + } + out := new(DockerStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericWebHookCause) DeepCopyInto(out *GenericWebHookCause) { + *out = *in + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericWebHookCause. +func (in *GenericWebHookCause) DeepCopy() *GenericWebHookCause { + if in == nil { + return nil + } + out := new(GenericWebHookCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericWebHookEvent) DeepCopyInto(out *GenericWebHookEvent) { + *out = *in + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(GitInfo) + (*in).DeepCopyInto(*out) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DockerStrategyOptions != nil { + in, out := &in.DockerStrategyOptions, &out.DockerStrategyOptions + *out = new(DockerStrategyOptions) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericWebHookEvent. +func (in *GenericWebHookEvent) DeepCopy() *GenericWebHookEvent { + if in == nil { + return nil + } + out := new(GenericWebHookEvent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitBuildSource) DeepCopyInto(out *GitBuildSource) { + *out = *in + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitBuildSource. +func (in *GitBuildSource) DeepCopy() *GitBuildSource { + if in == nil { + return nil + } + out := new(GitBuildSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitHubWebHookCause) DeepCopyInto(out *GitHubWebHookCause) { + *out = *in + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubWebHookCause. +func (in *GitHubWebHookCause) DeepCopy() *GitHubWebHookCause { + if in == nil { + return nil + } + out := new(GitHubWebHookCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitInfo) DeepCopyInto(out *GitInfo) { + *out = *in + in.GitBuildSource.DeepCopyInto(&out.GitBuildSource) + out.GitSourceRevision = in.GitSourceRevision + if in.Refs != nil { + in, out := &in.Refs, &out.Refs + *out = make([]GitRefInfo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitInfo. +func (in *GitInfo) DeepCopy() *GitInfo { + if in == nil { + return nil + } + out := new(GitInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitLabWebHookCause) DeepCopyInto(out *GitLabWebHookCause) { + *out = *in + in.CommonWebHookCause.DeepCopyInto(&out.CommonWebHookCause) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLabWebHookCause. +func (in *GitLabWebHookCause) DeepCopy() *GitLabWebHookCause { + if in == nil { + return nil + } + out := new(GitLabWebHookCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRefInfo) DeepCopyInto(out *GitRefInfo) { + *out = *in + in.GitBuildSource.DeepCopyInto(&out.GitBuildSource) + out.GitSourceRevision = in.GitSourceRevision + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRefInfo. +func (in *GitRefInfo) DeepCopy() *GitRefInfo { + if in == nil { + return nil + } + out := new(GitRefInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitSourceRevision) DeepCopyInto(out *GitSourceRevision) { + *out = *in + out.Author = in.Author + out.Committer = in.Committer + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitSourceRevision. +func (in *GitSourceRevision) DeepCopy() *GitSourceRevision { + if in == nil { + return nil + } + out := new(GitSourceRevision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageChangeCause) DeepCopyInto(out *ImageChangeCause) { + *out = *in + if in.FromRef != nil { + in, out := &in.FromRef, &out.FromRef + *out = new(corev1.ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageChangeCause. +func (in *ImageChangeCause) DeepCopy() *ImageChangeCause { + if in == nil { + return nil + } + out := new(ImageChangeCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageChangeTrigger) DeepCopyInto(out *ImageChangeTrigger) { + *out = *in + if in.From != nil { + in, out := &in.From, &out.From + *out = new(corev1.ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageChangeTrigger. +func (in *ImageChangeTrigger) DeepCopy() *ImageChangeTrigger { + if in == nil { + return nil + } + out := new(ImageChangeTrigger) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageChangeTriggerStatus) DeepCopyInto(out *ImageChangeTriggerStatus) { + *out = *in + out.From = in.From + in.LastTriggerTime.DeepCopyInto(&out.LastTriggerTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageChangeTriggerStatus. +func (in *ImageChangeTriggerStatus) DeepCopy() *ImageChangeTriggerStatus { + if in == nil { + return nil + } + out := new(ImageChangeTriggerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLabel) DeepCopyInto(out *ImageLabel) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLabel. +func (in *ImageLabel) DeepCopy() *ImageLabel { + if in == nil { + return nil + } + out := new(ImageLabel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSource) DeepCopyInto(out *ImageSource) { + *out = *in + out.From = in.From + if in.As != nil { + in, out := &in.As, &out.As + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]ImageSourcePath, len(*in)) + copy(*out, *in) + } + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSource. +func (in *ImageSource) DeepCopy() *ImageSource { + if in == nil { + return nil + } + out := new(ImageSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSourcePath) DeepCopyInto(out *ImageSourcePath) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSourcePath. +func (in *ImageSourcePath) DeepCopy() *ImageSourcePath { + if in == nil { + return nil + } + out := new(ImageSourcePath) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamTagReference) DeepCopyInto(out *ImageStreamTagReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamTagReference. +func (in *ImageStreamTagReference) DeepCopy() *ImageStreamTagReference { + if in == nil { + return nil + } + out := new(ImageStreamTagReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JenkinsPipelineBuildStrategy) DeepCopyInto(out *JenkinsPipelineBuildStrategy) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JenkinsPipelineBuildStrategy. +func (in *JenkinsPipelineBuildStrategy) DeepCopy() *JenkinsPipelineBuildStrategy { + if in == nil { + return nil + } + out := new(JenkinsPipelineBuildStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in OptionalNodeSelector) DeepCopyInto(out *OptionalNodeSelector) { + { + in := &in + *out = make(OptionalNodeSelector, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalNodeSelector. +func (in OptionalNodeSelector) DeepCopy() OptionalNodeSelector { + if in == nil { + return nil + } + out := new(OptionalNodeSelector) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) { + *out = *in + if in.HTTPProxy != nil { + in, out := &in.HTTPProxy, &out.HTTPProxy + *out = new(string) + **out = **in + } + if in.HTTPSProxy != nil { + in, out := &in.HTTPSProxy, &out.HTTPSProxy + *out = new(string) + **out = **in + } + if in.NoProxy != nil { + in, out := &in.NoProxy, &out.NoProxy + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. +func (in *ProxyConfig) DeepCopy() *ProxyConfig { + if in == nil { + return nil + } + out := new(ProxyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretBuildSource) DeepCopyInto(out *SecretBuildSource) { + *out = *in + out.Secret = in.Secret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretBuildSource. +func (in *SecretBuildSource) DeepCopy() *SecretBuildSource { + if in == nil { + return nil + } + out := new(SecretBuildSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretLocalReference) DeepCopyInto(out *SecretLocalReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretLocalReference. +func (in *SecretLocalReference) DeepCopy() *SecretLocalReference { + if in == nil { + return nil + } + out := new(SecretLocalReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretSpec) DeepCopyInto(out *SecretSpec) { + *out = *in + out.SecretSource = in.SecretSource + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretSpec. +func (in *SecretSpec) DeepCopy() *SecretSpec { + if in == nil { + return nil + } + out := new(SecretSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceBuildStrategy) DeepCopyInto(out *SourceBuildStrategy) { + *out = *in + out.From = in.From + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Incremental != nil { + in, out := &in.Incremental, &out.Incremental + *out = new(bool) + **out = **in + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]BuildVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceBuildStrategy. +func (in *SourceBuildStrategy) DeepCopy() *SourceBuildStrategy { + if in == nil { + return nil + } + out := new(SourceBuildStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceControlUser) DeepCopyInto(out *SourceControlUser) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceControlUser. +func (in *SourceControlUser) DeepCopy() *SourceControlUser { + if in == nil { + return nil + } + out := new(SourceControlUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceRevision) DeepCopyInto(out *SourceRevision) { + *out = *in + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(GitSourceRevision) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceRevision. +func (in *SourceRevision) DeepCopy() *SourceRevision { + if in == nil { + return nil + } + out := new(SourceRevision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceStrategyOptions) DeepCopyInto(out *SourceStrategyOptions) { + *out = *in + if in.Incremental != nil { + in, out := &in.Incremental, &out.Incremental + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceStrategyOptions. +func (in *SourceStrategyOptions) DeepCopy() *SourceStrategyOptions { + if in == nil { + return nil + } + out := new(SourceStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageInfo) DeepCopyInto(out *StageInfo) { + *out = *in + in.StartTime.DeepCopyInto(&out.StartTime) + if in.Steps != nil { + in, out := &in.Steps, &out.Steps + *out = make([]StepInfo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageInfo. +func (in *StageInfo) DeepCopy() *StageInfo { + if in == nil { + return nil + } + out := new(StageInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepInfo) DeepCopyInto(out *StepInfo) { + *out = *in + in.StartTime.DeepCopyInto(&out.StartTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepInfo. +func (in *StepInfo) DeepCopy() *StepInfo { + if in == nil { + return nil + } + out := new(StepInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebHookTrigger) DeepCopyInto(out *WebHookTrigger) { + *out = *in + if in.SecretReference != nil { + in, out := &in.SecretReference, &out.SecretReference + *out = new(SecretLocalReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebHookTrigger. +func (in *WebHookTrigger) DeepCopy() *WebHookTrigger { + if in == nil { + return nil + } + out := new(WebHookTrigger) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000..1da7843537f6a --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,692 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_BinaryBuildRequestOptions = map[string]string{ + "": "BinaryBuildRequestOptions are the options required to fully speficy a binary build request\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "asFile": "asFile determines if the binary should be created as a file within the source rather than extracted as an archive", + "revision.commit": "revision.commit is the value identifying a specific commit", + "revision.message": "revision.message is the description of a specific commit", + "revision.authorName": "revision.authorName of the source control user", + "revision.authorEmail": "revision.authorEmail of the source control user", + "revision.committerName": "revision.committerName of the source control user", + "revision.committerEmail": "revision.committerEmail of the source control user", +} + +func (BinaryBuildRequestOptions) SwaggerDoc() map[string]string { + return map_BinaryBuildRequestOptions +} + +var map_BinaryBuildSource = map[string]string{ + "": "BinaryBuildSource describes a binary file to be used for the Docker and Source build strategies, where the file will be extracted and used as the build source.", + "asFile": "asFile indicates that the provided binary input should be considered a single file within the build input. For example, specifying \"webapp.war\" would place the provided binary as `/webapp.war` for the builder. If left empty, the Docker and Source build strategies assume this file is a zip, tar, or tar.gz file and extract it as the source. The custom strategy receives this binary as standard input. This filename may not contain slashes or be '..' or '.'.", +} + +func (BinaryBuildSource) SwaggerDoc() map[string]string { + return map_BinaryBuildSource +} + +var map_BitbucketWebHookCause = map[string]string{ + "": "BitbucketWebHookCause has information about a Bitbucket webhook that triggered a build.", +} + +func (BitbucketWebHookCause) SwaggerDoc() map[string]string { + return map_BitbucketWebHookCause +} + +var map_Build = map[string]string{ + "": "Build encapsulates the inputs needed to produce a new deployable image, as well as the status of the execution and a reference to the Pod which executed the build.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is all the inputs used to execute the build.", + "status": "status is the current status of the build.", +} + +func (Build) SwaggerDoc() map[string]string { + return map_Build +} + +var map_BuildCondition = map[string]string{ + "": "BuildCondition describes the state of a build at a certain point.", + "type": "type of build condition.", + "status": "status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "lastTransitionTime": "The last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (BuildCondition) SwaggerDoc() map[string]string { + return map_BuildCondition +} + +var map_BuildConfig = map[string]string{ + "": "Build configurations define a build process for new container images. There are three types of builds possible - a container image build using a Dockerfile, a Source-to-Image build that uses a specially prepared base image that accepts source code that it can make runnable, and a custom build that can run // arbitrary container images as a base and accept the build parameters. Builds run on the cluster and on completion are pushed to the container image registry specified in the \"output\" section. A build can be triggered via a webhook, when the base image changes, or when a user manually requests a new build be // created.\n\nEach build created by a build configuration is numbered and refers back to its parent configuration. Multiple builds can be triggered at once. Builds that do not have \"output\" set can be used to test code or run a verification build.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds all the input necessary to produce a new build, and the conditions when to trigger them.", + "status": "status holds any relevant information about a build config", +} + +func (BuildConfig) SwaggerDoc() map[string]string { + return map_BuildConfig +} + +var map_BuildConfigList = map[string]string{ + "": "BuildConfigList is a collection of BuildConfigs.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of build configs", +} + +func (BuildConfigList) SwaggerDoc() map[string]string { + return map_BuildConfigList +} + +var map_BuildConfigSpec = map[string]string{ + "": "BuildConfigSpec describes when and how builds are created", + "triggers": "triggers determine how new Builds can be launched from a BuildConfig. If no triggers are defined, a new build can only occur as a result of an explicit client build creation.", + "runPolicy": "runPolicy describes how the new build created from this build configuration will be scheduled for execution. This is optional, if not specified we default to \"Serial\".", + "successfulBuildsHistoryLimit": "successfulBuildsHistoryLimit is the number of old successful builds to retain. When a BuildConfig is created, the 5 most recent successful builds are retained unless this value is set. If removed after the BuildConfig has been created, all successful builds are retained.", + "failedBuildsHistoryLimit": "failedBuildsHistoryLimit is the number of old failed builds to retain. When a BuildConfig is created, the 5 most recent failed builds are retained unless this value is set. If removed after the BuildConfig has been created, all failed builds are retained.", +} + +func (BuildConfigSpec) SwaggerDoc() map[string]string { + return map_BuildConfigSpec +} + +var map_BuildConfigStatus = map[string]string{ + "": "BuildConfigStatus contains current state of the build config object.", + "lastVersion": "lastVersion is used to inform about number of last triggered build.", + "imageChangeTriggers": "imageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger.", +} + +func (BuildConfigStatus) SwaggerDoc() map[string]string { + return map_BuildConfigStatus +} + +var map_BuildList = map[string]string{ + "": "BuildList is a collection of Builds.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of builds", +} + +func (BuildList) SwaggerDoc() map[string]string { + return map_BuildList +} + +var map_BuildLog = map[string]string{ + "": "BuildLog is the (unused) resource associated with the build log redirector\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (BuildLog) SwaggerDoc() map[string]string { + return map_BuildLog +} + +var map_BuildLogOptions = map[string]string{ + "": "BuildLogOptions is the REST options for a build log\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "container": "cointainer for which to stream logs. Defaults to only container if there is one container in the pod.", + "follow": "follow if true indicates that the build log should be streamed until the build terminates.", + "previous": "previous returns previous build logs. Defaults to false.", + "sinceSeconds": "sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "sinceTime": "sinceTime is an RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "timestamps": "timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", + "tailLines": "tailLines, If set, is the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", + "limitBytes": "limitBytes, If set, is the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", + "nowait": "nowait if true causes the call to return immediately even if the build is not available yet. Otherwise the server will wait until the build has started.", + "version": "version of the build for which to view logs.", + "insecureSkipTLSVerifyBackend": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet).", +} + +func (BuildLogOptions) SwaggerDoc() map[string]string { + return map_BuildLogOptions +} + +var map_BuildOutput = map[string]string{ + "": "BuildOutput is input to a build strategy and describes the container image that the strategy should produce.", + "to": "to defines an optional location to push the output of this build to. Kind must be one of 'ImageStreamTag' or 'DockerImage'. This value will be used to look up a container image repository to push to. In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of the build unless Namespace is specified.", + "pushSecret": "pushSecret is the name of a Secret that would be used for setting up the authentication for executing the Docker push to authentication enabled Docker Registry (or Docker Hub).", + "imageLabels": "imageLabels define a list of labels that are applied to the resulting image. If there are multiple labels with the same name then the last one in the list is used.", +} + +func (BuildOutput) SwaggerDoc() map[string]string { + return map_BuildOutput +} + +var map_BuildPostCommitSpec = map[string]string{ + "": "A BuildPostCommitSpec holds a build post commit hook specification. The hook executes a command in a temporary container running the build output image, immediately after the last layer of the image is committed and before the image is pushed to a registry. The command is executed with the current working directory ($PWD) set to the image's WORKDIR.\n\nThe build will be marked as failed if the hook execution fails. It will fail if the script or command return a non-zero exit code, or if there is any other error related to starting the temporary container.\n\nThere are five different ways to configure the hook. As an example, all forms below are equivalent and will execute `rake test --verbose`.\n\n1. Shell script:\n\n\t \"postCommit\": {\n\t \"script\": \"rake test --verbose\",\n\t }\n\n\tThe above is a convenient form which is equivalent to:\n\n\t \"postCommit\": {\n\t \"command\": [\"/bin/sh\", \"-ic\"],\n\t \"args\": [\"rake test --verbose\"]\n\t }\n\n2. A command as the image entrypoint:\n\n\t \"postCommit\": {\n\t \"commit\": [\"rake\", \"test\", \"--verbose\"]\n\t }\n\n\tCommand overrides the image entrypoint in the exec form, as documented in\n\tDocker: https://docs.docker.com/engine/reference/builder/#entrypoint.\n\n3. Pass arguments to the default entrypoint:\n\n\t \"postCommit\": {\n\t\t\t \"args\": [\"rake\", \"test\", \"--verbose\"]\n\t\t }\n\n\t This form is only useful if the image entrypoint can handle arguments.\n\n4. Shell script with arguments:\n\n\t \"postCommit\": {\n\t \"script\": \"rake test $1\",\n\t \"args\": [\"--verbose\"]\n\t }\n\n\tThis form is useful if you need to pass arguments that would otherwise be\n\thard to quote properly in the shell script. In the script, $0 will be\n\t\"/bin/sh\" and $1, $2, etc, are the positional arguments from Args.\n\n5. Command with arguments:\n\n\t \"postCommit\": {\n\t \"command\": [\"rake\", \"test\"],\n\t \"args\": [\"--verbose\"]\n\t }\n\n\tThis form is equivalent to appending the arguments to the Command slice.\n\nIt is invalid to provide both Script and Command simultaneously. If none of the fields are specified, the hook is not executed.", + "command": "command is the command to run. It may not be specified with Script. This might be needed if the image doesn't have `/bin/sh`, or if you do not want to use a shell. In all other cases, using Script might be more convenient.", + "args": "args is a list of arguments that are provided to either Command, Script or the container image's default entrypoint. The arguments are placed immediately after the command to be run.", + "script": "script is a shell script to be run with `/bin/sh -ic`. It may not be specified with Command. Use Script when a shell script is appropriate to execute the post build hook, for example for running unit tests with `rake test`. If you need control over the image entrypoint, or if the image does not have `/bin/sh`, use Command and/or Args. The `-i` flag is needed to support CentOS and RHEL images that use Software Collections (SCL), in order to have the appropriate collections enabled in the shell. E.g., in the Ruby image, this is necessary to make `ruby`, `bundle` and other binaries available in the PATH.", +} + +func (BuildPostCommitSpec) SwaggerDoc() map[string]string { + return map_BuildPostCommitSpec +} + +var map_BuildRequest = map[string]string{ + "": "BuildRequest is the resource used to pass parameters to build generator\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "revision": "revision is the information from the source for a specific repo snapshot.", + "triggeredByImage": "triggeredByImage is the Image that triggered this build.", + "from": "from is the reference to the ImageStreamTag that triggered the build.", + "binary": "binary indicates a request to build from a binary provided to the builder", + "lastVersion": "lastVersion (optional) is the LastVersion of the BuildConfig that was used to generate the build. If the BuildConfig in the generator doesn't match, a build will not be generated.", + "env": "env contains additional environment variables you want to pass into a builder container.", + "triggeredBy": "triggeredBy describes which triggers started the most recent update to the build configuration and contains information about those triggers.", + "dockerStrategyOptions": "dockerStrategyOptions contains additional docker-strategy specific options for the build", + "sourceStrategyOptions": "sourceStrategyOptions contains additional source-strategy specific options for the build", +} + +func (BuildRequest) SwaggerDoc() map[string]string { + return map_BuildRequest +} + +var map_BuildSource = map[string]string{ + "": "BuildSource is the SCM used for the build.", + "type": "type of build input to accept", + "binary": "binary builds accept a binary as their input. The binary is generally assumed to be a tar, gzipped tar, or zip file depending on the strategy. For container image builds, this is the build context and an optional Dockerfile may be specified to override any Dockerfile in the build context. For Source builds, this is assumed to be an archive as described above. For Source and container image builds, if binary.asFile is set the build will receive a directory with a single file. contextDir may be used when an archive is provided. Custom builds will receive this binary as input on STDIN.", + "dockerfile": "dockerfile is the raw contents of a Dockerfile which should be built. When this option is specified, the FROM may be modified based on your strategy base image and additional ENV stanzas from your strategy environment will be added after the FROM, but before the rest of your Dockerfile stanzas. The Dockerfile source type may be used with other options like git - in those cases the Git repo will have any innate Dockerfile replaced in the context dir.", + "git": "git contains optional information about git build source", + "images": "images describes a set of images to be used to provide source for the build", + "contextDir": "contextDir specifies the sub-directory where the source code for the application exists. This allows to have buildable sources in directory other than root of repository.", + "sourceSecret": "sourceSecret is the name of a Secret that would be used for setting up the authentication for cloning private repository. The secret contains valid credentials for remote repository, where the data's key represent the authentication method to be used and value is the base64 encoded credentials. Supported auth methods are: ssh-privatekey.", + "secrets": "secrets represents a list of secrets and their destinations that will be used only for the build.", + "configMaps": "configMaps represents a list of configMaps and their destinations that will be used for the build.", +} + +func (BuildSource) SwaggerDoc() map[string]string { + return map_BuildSource +} + +var map_BuildSpec = map[string]string{ + "": "BuildSpec has the information to represent a build and also additional information about a build", + "triggeredBy": "triggeredBy describes which triggers started the most recent update to the build configuration and contains information about those triggers.", +} + +func (BuildSpec) SwaggerDoc() map[string]string { + return map_BuildSpec +} + +var map_BuildStatus = map[string]string{ + "": "BuildStatus contains the status of a build", + "phase": "phase is the point in the build lifecycle. Possible values are \"New\", \"Pending\", \"Running\", \"Complete\", \"Failed\", \"Error\", and \"Cancelled\".", + "cancelled": "cancelled describes if a cancel event was triggered for the build.", + "reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", + "message": "message is a human-readable message indicating details about why the build has this status.", + "startTimestamp": "startTimestamp is a timestamp representing the server time when this Build started running in a Pod. It is represented in RFC3339 form and is in UTC.", + "completionTimestamp": "completionTimestamp is a timestamp representing the server time when this Build was finished, whether that build failed or succeeded. It reflects the time at which the Pod running the Build terminated. It is represented in RFC3339 form and is in UTC.", + "duration": "duration contains time.Duration object describing build time.", + "outputDockerImageReference": "outputDockerImageReference contains a reference to the container image that will be built by this build. Its value is computed from Build.Spec.Output.To, and should include the registry address, so that it can be used to push and pull the image.", + "config": "config is an ObjectReference to the BuildConfig this Build is based on.", + "output": "output describes the container image the build has produced.", + "stages": "stages contains details about each stage that occurs during the build including start time, duration (in milliseconds), and the steps that occured within each stage.", + "logSnippet": "logSnippet is the last few lines of the build log. This value is only set for builds that failed.", + "conditions": "conditions represents the latest available observations of a build's current state.", +} + +func (BuildStatus) SwaggerDoc() map[string]string { + return map_BuildStatus +} + +var map_BuildStatusOutput = map[string]string{ + "": "BuildStatusOutput contains the status of the built image.", + "to": "to describes the status of the built image being pushed to a registry.", +} + +func (BuildStatusOutput) SwaggerDoc() map[string]string { + return map_BuildStatusOutput +} + +var map_BuildStatusOutputTo = map[string]string{ + "": "BuildStatusOutputTo describes the status of the built image with regards to image registry to which it was supposed to be pushed.", + "imageDigest": "imageDigest is the digest of the built container image. The digest uniquely identifies the image in the registry to which it was pushed.\n\nPlease note that this field may not always be set even if the push completes successfully - e.g. when the registry returns no digest or returns it in a format that the builder doesn't understand.", +} + +func (BuildStatusOutputTo) SwaggerDoc() map[string]string { + return map_BuildStatusOutputTo +} + +var map_BuildStrategy = map[string]string{ + "": "BuildStrategy contains the details of how to perform a build.", + "type": "type is the kind of build strategy.", + "dockerStrategy": "dockerStrategy holds the parameters to the container image build strategy.", + "sourceStrategy": "sourceStrategy holds the parameters to the Source build strategy.", + "customStrategy": "customStrategy holds the parameters to the Custom build strategy", + "jenkinsPipelineStrategy": "jenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. Deprecated: use OpenShift Pipelines", +} + +func (BuildStrategy) SwaggerDoc() map[string]string { + return map_BuildStrategy +} + +var map_BuildTriggerCause = map[string]string{ + "": "BuildTriggerCause holds information about a triggered build. It is used for displaying build trigger data for each build and build configuration in oc describe. It is also used to describe which triggers led to the most recent update in the build configuration.", + "message": "message is used to store a human readable message for why the build was triggered. E.g.: \"Manually triggered by user\", \"Configuration change\",etc.", + "genericWebHook": "genericWebHook holds data about a builds generic webhook trigger.", + "githubWebHook": "githubWebHook represents data for a GitHub webhook that fired a specific build.", + "imageChangeBuild": "imageChangeBuild stores information about an imagechange event that triggered a new build.", + "gitlabWebHook": "gitlabWebHook represents data for a GitLab webhook that fired a specific build.", + "bitbucketWebHook": "bitbucketWebHook represents data for a Bitbucket webhook that fired a specific build.", +} + +func (BuildTriggerCause) SwaggerDoc() map[string]string { + return map_BuildTriggerCause +} + +var map_BuildTriggerPolicy = map[string]string{ + "": "BuildTriggerPolicy describes a policy for a single trigger that results in a new Build.", + "type": "type is the type of build trigger. Valid values:\n\n- GitHub GitHubWebHookBuildTriggerType represents a trigger that launches builds on GitHub webhook invocations\n\n- Generic GenericWebHookBuildTriggerType represents a trigger that launches builds on generic webhook invocations\n\n- GitLab GitLabWebHookBuildTriggerType represents a trigger that launches builds on GitLab webhook invocations\n\n- Bitbucket BitbucketWebHookBuildTriggerType represents a trigger that launches builds on Bitbucket webhook invocations\n\n- ImageChange ImageChangeBuildTriggerType represents a trigger that launches builds on availability of a new version of an image\n\n- ConfigChange ConfigChangeBuildTriggerType will trigger a build on an initial build config creation WARNING: In the future the behavior will change to trigger a build on any config change", + "github": "github contains the parameters for a GitHub webhook type of trigger", + "generic": "generic contains the parameters for a Generic webhook type of trigger", + "imageChange": "imageChange contains parameters for an ImageChange type of trigger", + "gitlab": "GitLabWebHook contains the parameters for a GitLab webhook type of trigger", + "bitbucket": "BitbucketWebHook contains the parameters for a Bitbucket webhook type of trigger", +} + +func (BuildTriggerPolicy) SwaggerDoc() map[string]string { + return map_BuildTriggerPolicy +} + +var map_BuildVolume = map[string]string{ + "": "BuildVolume describes a volume that is made available to build pods, such that it can be mounted into buildah's runtime environment. Only a subset of Kubernetes Volume sources are supported.", + "name": "name is a unique identifier for this BuildVolume. It must conform to the Kubernetes DNS label standard and be unique within the pod. Names that collide with those added by the build controller will result in a failed build with an error message detailing which name caused the error. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "source": "source represents the location and type of the mounted volume.", + "mounts": "mounts represents the location of the volume in the image build container", +} + +func (BuildVolume) SwaggerDoc() map[string]string { + return map_BuildVolume +} + +var map_BuildVolumeMount = map[string]string{ + "": "BuildVolumeMount describes the mounting of a Volume within buildah's runtime environment.", + "destinationPath": "destinationPath is the path within the buildah runtime environment at which the volume should be mounted. The transient mount within the build image and the backing volume will both be mounted read only. Must be an absolute path, must not contain '..' or ':', and must not collide with a destination path generated by the builder process Paths that collide with those added by the build controller will result in a failed build with an error message detailing which path caused the error.", +} + +func (BuildVolumeMount) SwaggerDoc() map[string]string { + return map_BuildVolumeMount +} + +var map_BuildVolumeSource = map[string]string{ + "": "BuildVolumeSource represents the source of a volume to mount Only one of its supported types may be specified at any given time.", + "type": "type is the BuildVolumeSourceType for the volume source. Type must match the populated volume source. Valid types are: Secret, ConfigMap", + "secret": "secret represents a Secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + "configMap": "configMap represents a ConfigMap that should populate this volume", + "csi": "csi represents ephemeral storage provided by external CSI drivers which support this capability", +} + +func (BuildVolumeSource) SwaggerDoc() map[string]string { + return map_BuildVolumeSource +} + +var map_CommonSpec = map[string]string{ + "": "CommonSpec encapsulates all the inputs necessary to represent a build.", + "serviceAccount": "serviceAccount is the name of the ServiceAccount to use to run the pod created by this build. The pod will be allowed to use secrets referenced by the ServiceAccount", + "source": "source describes the SCM in use.", + "revision": "revision is the information from the source for a specific repo snapshot. This is optional.", + "strategy": "strategy defines how to perform a build.", + "output": "output describes the container image the Strategy should produce.", + "resources": "resources computes resource requirements to execute the build.", + "postCommit": "postCommit is a build hook executed after the build output image is committed, before it is pushed to a registry.", + "completionDeadlineSeconds": "completionDeadlineSeconds is an optional duration in seconds, counted from the time when a build pod gets scheduled in the system, that the build may be active on a node before the system actively tries to terminate the build; value must be positive integer", + "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node If nil, it can be overridden by default build nodeselector values for the cluster. If set to an empty map or a map with any values, default build nodeselector values are ignored.", + "mountTrustedCA": "mountTrustedCA bind mounts the cluster's trusted certificate authorities, as defined in the cluster's proxy configuration, into the build. This lets processes within a build trust components signed by custom PKI certificate authorities, such as private artifact repositories and HTTPS proxies.\n\nWhen this field is set to true, the contents of `/etc/pki/ca-trust` within the build are managed by the build container, and any changes to this directory or its subdirectories (for example - within a Dockerfile `RUN` instruction) are not persisted in the build's output image.", +} + +func (CommonSpec) SwaggerDoc() map[string]string { + return map_CommonSpec +} + +var map_CommonWebHookCause = map[string]string{ + "": "CommonWebHookCause factors out the identical format of these webhook causes into struct so we can share it in the specific causes; it is too late for GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket.", + "revision": "revision is the git source revision information of the trigger.", + "secret": "secret is the obfuscated webhook secret that triggered a build.", +} + +func (CommonWebHookCause) SwaggerDoc() map[string]string { + return map_CommonWebHookCause +} + +var map_ConfigMapBuildSource = map[string]string{ + "": "ConfigMapBuildSource describes a configmap and its destination directory that will be used only at the build time. The content of the configmap referenced here will be copied into the destination directory instead of mounting.", + "configMap": "configMap is a reference to an existing configmap that you want to use in your build.", + "destinationDir": "destinationDir is the directory where the files from the configmap should be available for the build time. For the Source build strategy, these will be injected into a container where the assemble script runs. For the container image build strategy, these will be copied into the build directory, where the Dockerfile is located, so users can ADD or COPY them during container image build.", +} + +func (ConfigMapBuildSource) SwaggerDoc() map[string]string { + return map_ConfigMapBuildSource +} + +var map_CustomBuildStrategy = map[string]string{ + "": "CustomBuildStrategy defines input parameters specific to Custom build.", + "from": "from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which the container image should be pulled", + "pullSecret": "pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the container images from the private Docker registries", + "env": "env contains additional environment variables you want to pass into a builder container.", + "exposeDockerSocket": "exposeDockerSocket will allow running Docker commands (and build container images) from inside the container.", + "forcePull": "forcePull describes if the controller should configure the build pod to always pull the images for the builder or only pull if it is not present locally", + "secrets": "secrets is a list of additional secrets that will be included in the build pod", + "buildAPIVersion": "buildAPIVersion is the requested API version for the Build object serialized and passed to the custom builder", +} + +func (CustomBuildStrategy) SwaggerDoc() map[string]string { + return map_CustomBuildStrategy +} + +var map_DockerBuildStrategy = map[string]string{ + "": "DockerBuildStrategy defines input parameters specific to container image build.", + "from": "from is a reference to an DockerImage, ImageStreamTag, or ImageStreamImage which overrides the FROM image in the Dockerfile for the build. If the Dockerfile uses multi-stage builds, this will replace the image in the last FROM directive of the file.", + "pullSecret": "pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the container images from the private Docker registries", + "noCache": "noCache if set to true indicates that the container image build must be executed with the --no-cache=true flag", + "env": "env contains additional environment variables you want to pass into a builder container.", + "forcePull": "forcePull describes if the builder should pull the images from registry prior to building.", + "dockerfilePath": "dockerfilePath is the path of the Dockerfile that will be used to build the container image, relative to the root of the context (contextDir). Defaults to `Dockerfile` if unset.", + "buildArgs": "buildArgs contains build arguments that will be resolved in the Dockerfile. See https://docs.docker.com/engine/reference/builder/#/arg for more details. NOTE: Only the 'name' and 'value' fields are supported. Any settings on the 'valueFrom' field are ignored.", + "imageOptimizationPolicy": "imageOptimizationPolicy describes what optimizations the system can use when building images to reduce the final size or time spent building the image. The default policy is 'None' which means the final build image will be equivalent to an image created by the container image build API. The experimental policy 'SkipLayers' will avoid commiting new layers in between each image step, and will fail if the Dockerfile cannot provide compatibility with the 'None' policy. An additional experimental policy 'SkipLayersAndWarn' is the same as 'SkipLayers' but simply warns if compatibility cannot be preserved.", + "volumes": "volumes is a list of input volumes that can be mounted into the builds runtime environment. Only a subset of Kubernetes Volume sources are supported by builds. More info: https://kubernetes.io/docs/concepts/storage/volumes", +} + +func (DockerBuildStrategy) SwaggerDoc() map[string]string { + return map_DockerBuildStrategy +} + +var map_DockerStrategyOptions = map[string]string{ + "": "DockerStrategyOptions contains extra strategy options for container image builds", + "buildArgs": "Args contains any build arguments that are to be passed to Docker. See https://docs.docker.com/engine/reference/builder/#/arg for more details", + "noCache": "noCache overrides the docker-strategy noCache option in the build config", +} + +func (DockerStrategyOptions) SwaggerDoc() map[string]string { + return map_DockerStrategyOptions +} + +var map_GenericWebHookCause = map[string]string{ + "": "GenericWebHookCause holds information about a generic WebHook that triggered a build.", + "revision": "revision is an optional field that stores the git source revision information of the generic webhook trigger when it is available.", + "secret": "secret is the obfuscated webhook secret that triggered a build.", +} + +func (GenericWebHookCause) SwaggerDoc() map[string]string { + return map_GenericWebHookCause +} + +var map_GenericWebHookEvent = map[string]string{ + "": "GenericWebHookEvent is the payload expected for a generic webhook post", + "type": "type is the type of source repository", + "git": "git is the git information if the Type is BuildSourceGit", + "env": "env contains additional environment variables you want to pass into a builder container. ValueFrom is not supported.", + "dockerStrategyOptions": "dockerStrategyOptions contains additional docker-strategy specific options for the build", +} + +func (GenericWebHookEvent) SwaggerDoc() map[string]string { + return map_GenericWebHookEvent +} + +var map_GitBuildSource = map[string]string{ + "": "GitBuildSource defines the parameters of a Git SCM", + "uri": "uri points to the source that will be built. The structure of the source will depend on the type of build to run", + "ref": "ref is the branch/tag/ref to build.", +} + +func (GitBuildSource) SwaggerDoc() map[string]string { + return map_GitBuildSource +} + +var map_GitHubWebHookCause = map[string]string{ + "": "GitHubWebHookCause has information about a GitHub webhook that triggered a build.", + "revision": "revision is the git revision information of the trigger.", + "secret": "secret is the obfuscated webhook secret that triggered a build.", +} + +func (GitHubWebHookCause) SwaggerDoc() map[string]string { + return map_GitHubWebHookCause +} + +var map_GitInfo = map[string]string{ + "": "GitInfo is the aggregated git information for a generic webhook post", + "refs": "refs is a list of GitRefs for the provided repo - generally sent when used from a post-receive hook. This field is optional and is used when sending multiple refs", +} + +func (GitInfo) SwaggerDoc() map[string]string { + return map_GitInfo +} + +var map_GitLabWebHookCause = map[string]string{ + "": "GitLabWebHookCause has information about a GitLab webhook that triggered a build.", +} + +func (GitLabWebHookCause) SwaggerDoc() map[string]string { + return map_GitLabWebHookCause +} + +var map_GitRefInfo = map[string]string{ + "": "GitRefInfo is a single ref", +} + +func (GitRefInfo) SwaggerDoc() map[string]string { + return map_GitRefInfo +} + +var map_GitSourceRevision = map[string]string{ + "": "GitSourceRevision is the commit information from a git source for a build", + "commit": "commit is the commit hash identifying a specific commit", + "author": "author is the author of a specific commit", + "committer": "committer is the committer of a specific commit", + "message": "message is the description of a specific commit", +} + +func (GitSourceRevision) SwaggerDoc() map[string]string { + return map_GitSourceRevision +} + +var map_ImageChangeCause = map[string]string{ + "": "ImageChangeCause contains information about the image that triggered a build", + "imageID": "imageID is the ID of the image that triggered a new build.", + "fromRef": "fromRef contains detailed information about an image that triggered a build.", +} + +func (ImageChangeCause) SwaggerDoc() map[string]string { + return map_ImageChangeCause +} + +var map_ImageChangeTrigger = map[string]string{ + "": "ImageChangeTrigger allows builds to be triggered when an ImageStream changes", + "lastTriggeredImageID": "lastTriggeredImageID is used internally by the ImageChangeController to save last used image ID for build This field is deprecated and will be removed in a future release. Deprecated", + "from": "from is a reference to an ImageStreamTag that will trigger a build when updated It is optional. If no From is specified, the From image from the build strategy will be used. Only one ImageChangeTrigger with an empty From reference is allowed in a build configuration.", + "paused": "paused is true if this trigger is temporarily disabled. Optional.", +} + +func (ImageChangeTrigger) SwaggerDoc() map[string]string { + return map_ImageChangeTrigger +} + +var map_ImageChangeTriggerStatus = map[string]string{ + "": "ImageChangeTriggerStatus tracks the latest resolved status of the associated ImageChangeTrigger policy specified in the BuildConfigSpec.Triggers struct.", + "lastTriggeredImageID": "lastTriggeredImageID represents the sha/id of the ImageStreamTag when a Build for this BuildConfig was started. The lastTriggeredImageID is updated each time a Build for this BuildConfig is started, even if this ImageStreamTag is not the reason the Build is started.", + "from": "from is the ImageStreamTag that is the source of the trigger.", + "lastTriggerTime": "lastTriggerTime is the last time this particular ImageStreamTag triggered a Build to start. This field is only updated when this trigger specifically started a Build.", +} + +func (ImageChangeTriggerStatus) SwaggerDoc() map[string]string { + return map_ImageChangeTriggerStatus +} + +var map_ImageLabel = map[string]string{ + "": "ImageLabel represents a label applied to the resulting image.", + "name": "name defines the name of the label. It must have non-zero length.", + "value": "value defines the literal value of the label.", +} + +func (ImageLabel) SwaggerDoc() map[string]string { + return map_ImageLabel +} + +var map_ImageSource = map[string]string{ + "": "ImageSource is used to describe build source that will be extracted from an image or used during a multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. A pull secret can be specified to pull the image from an external registry or override the default service account secret if pulling from the internal registry. Image sources can either be used to extract content from an image and place it into the build context along with the repository source, or used directly during a multi-stage container image build to allow content to be copied without overwriting the contents of the repository source (see the 'paths' and 'as' fields).", + "from": "from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to copy source from.", + "as": "A list of image names that this source will be used in place of during a multi-stage container image build. For instance, a Dockerfile that uses \"COPY --from=nginx:latest\" will first check for an image source that has \"nginx:latest\" in this field before attempting to pull directly. If the Dockerfile does not reference an image source it is ignored. This field and paths may both be set, in which case the contents will be used twice.", + "paths": "paths is a list of source and destination paths to copy from the image. This content will be copied into the build context prior to starting the build. If no paths are set, the build context will not be altered.", + "pullSecret": "pullSecret is a reference to a secret to be used to pull the image from a registry If the image is pulled from the OpenShift registry, this field does not need to be set.", +} + +func (ImageSource) SwaggerDoc() map[string]string { + return map_ImageSource +} + +var map_ImageSourcePath = map[string]string{ + "": "ImageSourcePath describes a path to be copied from a source image and its destination within the build directory.", + "sourcePath": "sourcePath is the absolute path of the file or directory inside the image to copy to the build directory. If the source path ends in /. then the content of the directory will be copied, but the directory itself will not be created at the destination.", + "destinationDir": "destinationDir is the relative directory within the build directory where files copied from the image are placed.", +} + +func (ImageSourcePath) SwaggerDoc() map[string]string { + return map_ImageSourcePath +} + +var map_ImageStreamTagReference = map[string]string{ + "": "ImageStreamTagReference references the ImageStreamTag in an image change trigger by namespace and name.", + "namespace": "namespace is the namespace where the ImageStreamTag for an ImageChangeTrigger is located", + "name": "name is the name of the ImageStreamTag for an ImageChangeTrigger", +} + +func (ImageStreamTagReference) SwaggerDoc() map[string]string { + return map_ImageStreamTagReference +} + +var map_JenkinsPipelineBuildStrategy = map[string]string{ + "": "JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. Deprecated: use OpenShift Pipelines", + "jenkinsfilePath": "jenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are both not specified, this defaults to Jenkinsfile in the root of the specified contextDir.", + "jenkinsfile": "jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build.", + "env": "env contains additional environment variables you want to pass into a build pipeline.", +} + +func (JenkinsPipelineBuildStrategy) SwaggerDoc() map[string]string { + return map_JenkinsPipelineBuildStrategy +} + +var map_ProxyConfig = map[string]string{ + "": "ProxyConfig defines what proxies to use for an operation", + "httpProxy": "httpProxy is a proxy used to reach the git repository over http", + "httpsProxy": "httpsProxy is a proxy used to reach the git repository over https", + "noProxy": "noProxy is the list of domains for which the proxy should not be used", +} + +func (ProxyConfig) SwaggerDoc() map[string]string { + return map_ProxyConfig +} + +var map_SecretBuildSource = map[string]string{ + "": "SecretBuildSource describes a secret and its destination directory that will be used only at the build time. The content of the secret referenced here will be copied into the destination directory instead of mounting.", + "secret": "secret is a reference to an existing secret that you want to use in your build.", + "destinationDir": "destinationDir is the directory where the files from the secret should be available for the build time. For the Source build strategy, these will be injected into a container where the assemble script runs. Later, when the script finishes, all files injected will be truncated to zero length. For the container image build strategy, these will be copied into the build directory, where the Dockerfile is located, so users can ADD or COPY them during container image build.", +} + +func (SecretBuildSource) SwaggerDoc() map[string]string { + return map_SecretBuildSource +} + +var map_SecretLocalReference = map[string]string{ + "": "SecretLocalReference contains information that points to the local secret being used", + "name": "name is the name of the resource in the same namespace being referenced", +} + +func (SecretLocalReference) SwaggerDoc() map[string]string { + return map_SecretLocalReference +} + +var map_SecretSpec = map[string]string{ + "": "SecretSpec specifies a secret to be included in a build pod and its corresponding mount point", + "secretSource": "secretSource is a reference to the secret", + "mountPath": "mountPath is the path at which to mount the secret", +} + +func (SecretSpec) SwaggerDoc() map[string]string { + return map_SecretSpec +} + +var map_SourceBuildStrategy = map[string]string{ + "": "SourceBuildStrategy defines input parameters specific to an Source build.", + "from": "from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which the container image should be pulled", + "pullSecret": "pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the container images from the private Docker registries", + "env": "env contains additional environment variables you want to pass into a builder container.", + "scripts": "scripts is the location of Source scripts", + "incremental": "incremental flag forces the Source build to do incremental builds if true.", + "forcePull": "forcePull describes if the builder should pull the images from registry prior to building.", + "volumes": "volumes is a list of input volumes that can be mounted into the builds runtime environment. Only a subset of Kubernetes Volume sources are supported by builds. More info: https://kubernetes.io/docs/concepts/storage/volumes", +} + +func (SourceBuildStrategy) SwaggerDoc() map[string]string { + return map_SourceBuildStrategy +} + +var map_SourceControlUser = map[string]string{ + "": "SourceControlUser defines the identity of a user of source control", + "name": "name of the source control user", + "email": "email of the source control user", +} + +func (SourceControlUser) SwaggerDoc() map[string]string { + return map_SourceControlUser +} + +var map_SourceRevision = map[string]string{ + "": "SourceRevision is the revision or commit information from the source for the build", + "type": "type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images'", + "git": "git contains information about git-based build source", +} + +func (SourceRevision) SwaggerDoc() map[string]string { + return map_SourceRevision +} + +var map_SourceStrategyOptions = map[string]string{ + "": "SourceStrategyOptions contains extra strategy options for Source builds", + "incremental": "incremental overrides the source-strategy incremental option in the build config", +} + +func (SourceStrategyOptions) SwaggerDoc() map[string]string { + return map_SourceStrategyOptions +} + +var map_StageInfo = map[string]string{ + "": "StageInfo contains details about a build stage.", + "name": "name is a unique identifier for each build stage that occurs.", + "startTime": "startTime is a timestamp representing the server time when this Stage started. It is represented in RFC3339 form and is in UTC.", + "durationMilliseconds": "durationMilliseconds identifies how long the stage took to complete in milliseconds. Note: the duration of a stage can exceed the sum of the duration of the steps within the stage as not all actions are accounted for in explicit build steps.", + "steps": "steps contains details about each step that occurs during a build stage including start time and duration in milliseconds.", +} + +func (StageInfo) SwaggerDoc() map[string]string { + return map_StageInfo +} + +var map_StepInfo = map[string]string{ + "": "StepInfo contains details about a build step.", + "name": "name is a unique identifier for each build step.", + "startTime": "startTime is a timestamp representing the server time when this Step started. it is represented in RFC3339 form and is in UTC.", + "durationMilliseconds": "durationMilliseconds identifies how long the step took to complete in milliseconds.", +} + +func (StepInfo) SwaggerDoc() map[string]string { + return map_StepInfo +} + +var map_WebHookTrigger = map[string]string{ + "": "WebHookTrigger is a trigger that gets invoked using a webhook type of post", + "secret": "secret used to validate requests. Deprecated: use SecretReference instead.", + "allowEnv": "allowEnv determines whether the webhook can set environment variables; can only be set to true for GenericWebHook.", + "secretReference": "secretReference is a reference to a secret in the same namespace, containing the value to be validated when the webhook is invoked. The secret being referenced must contain a key named \"WebHookSecretKey\", the value of which will be checked against the value supplied in the webhook invocation.", +} + +func (WebHookTrigger) SwaggerDoc() map[string]string { + return map_WebHookTrigger +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/config/v1/Makefile b/vendor/github.com/openshift/api/config/v1/Makefile new file mode 100644 index 0000000000000..66bf636305aac --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="config.openshift.io/v1" diff --git a/vendor/github.com/openshift/api/config/v1/doc.go b/vendor/github.com/openshift/api/config/v1/doc.go new file mode 100644 index 0000000000000..f994547583dad --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/doc.go @@ -0,0 +1,9 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true +// +openshift:featuregated-schema-gen=true + +// +kubebuilder:validation:Optional +// +groupName=config.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/config/v1/register.go b/vendor/github.com/openshift/api/config/v1/register.go new file mode 100644 index 0000000000000..61302592eab18 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/register.go @@ -0,0 +1,78 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "config.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &APIServer{}, + &APIServerList{}, + &Authentication{}, + &AuthenticationList{}, + &Build{}, + &BuildList{}, + &ClusterOperator{}, + &ClusterOperatorList{}, + &ClusterVersion{}, + &ClusterVersionList{}, + &Console{}, + &ConsoleList{}, + &DNS{}, + &DNSList{}, + &FeatureGate{}, + &FeatureGateList{}, + &Image{}, + &ImageList{}, + &Infrastructure{}, + &InfrastructureList{}, + &Ingress{}, + &IngressList{}, + &Node{}, + &NodeList{}, + &Network{}, + &NetworkList{}, + &OAuth{}, + &OAuthList{}, + &OperatorHub{}, + &OperatorHubList{}, + &Project{}, + &ProjectList{}, + &Proxy{}, + &ProxyList{}, + &Scheduler{}, + &SchedulerList{}, + &ImageContentPolicy{}, + &ImageContentPolicyList{}, + &ImageDigestMirrorSet{}, + &ImageDigestMirrorSetList{}, + &ImageTagMirrorSet{}, + &ImageTagMirrorSetList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/config/v1/stringsource.go b/vendor/github.com/openshift/api/config/v1/stringsource.go new file mode 100644 index 0000000000000..6a5718c1db271 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/stringsource.go @@ -0,0 +1,31 @@ +package v1 + +import "encoding/json" + +// UnmarshalJSON implements the json.Unmarshaller interface. +// If the value is a string, it sets the Value field of the StringSource. +// Otherwise, it is unmarshaled into the StringSourceSpec struct +func (s *StringSource) UnmarshalJSON(value []byte) error { + // If we can unmarshal to a simple string, just set the value + var simpleValue string + if err := json.Unmarshal(value, &simpleValue); err == nil { + s.Value = simpleValue + return nil + } + + // Otherwise do the full struct unmarshal + return json.Unmarshal(value, &s.StringSourceSpec) +} + +// MarshalJSON implements the json.Marshaller interface. +// If the StringSource contains only a string Value (or is empty), it is marshaled as a JSON string. +// Otherwise, the StringSourceSpec struct is marshaled as a JSON object. +func (s *StringSource) MarshalJSON() ([]byte, error) { + // If we have only a cleartext value set, do a simple string marshal + if s.StringSourceSpec == (StringSourceSpec{Value: s.Value}) { + return json.Marshal(s.Value) + } + + // Otherwise do the full struct marshal of the externalized bits + return json.Marshal(s.StringSourceSpec) +} diff --git a/vendor/github.com/openshift/api/config/v1/types.go b/vendor/github.com/openshift/api/config/v1/types.go new file mode 100644 index 0000000000000..3e17ca0ccb67e --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types.go @@ -0,0 +1,431 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// ConfigMapFileReference references a config map in a specific namespace. +// The namespace must be specified at the point of use. +type ConfigMapFileReference struct { + Name string `json:"name"` + // key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. + Key string `json:"key,omitempty"` +} + +// ConfigMapNameReference references a config map in a specific namespace. +// The namespace must be specified at the point of use. +type ConfigMapNameReference struct { + // name is the metadata.name of the referenced config map + // +required + Name string `json:"name"` +} + +// SecretNameReference references a secret in a specific namespace. +// The namespace must be specified at the point of use. +type SecretNameReference struct { + // name is the metadata.name of the referenced secret + // +required + Name string `json:"name"` +} + +// HTTPServingInfo holds configuration for serving HTTP +type HTTPServingInfo struct { + // ServingInfo is the HTTP serving information + ServingInfo `json:",inline"` + // maxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit. + MaxRequestsInFlight int64 `json:"maxRequestsInFlight"` + // requestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if + // -1 there is no limit on requests. + RequestTimeoutSeconds int64 `json:"requestTimeoutSeconds"` +} + +// ServingInfo holds information about serving web pages +type ServingInfo struct { + // bindAddress is the ip:port to serve on + BindAddress string `json:"bindAddress"` + // bindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", + // "tcp4", and "tcp6" + BindNetwork string `json:"bindNetwork"` + // CertInfo is the TLS cert info for serving secure traffic. + // this is anonymous so that we can inline it for serialization + CertInfo `json:",inline"` + // clientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates + // +optional + ClientCA string `json:"clientCA,omitempty"` + // namedCertificates is a list of certificates to use to secure requests to specific hostnames + NamedCertificates []NamedCertificate `json:"namedCertificates,omitempty"` + // minTLSVersion is the minimum TLS version supported. + // Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants + MinTLSVersion string `json:"minTLSVersion,omitempty"` + // cipherSuites contains an overridden list of ciphers for the server to support. + // Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants + CipherSuites []string `json:"cipherSuites,omitempty"` +} + +// CertInfo relates a certificate with a private key +type CertInfo struct { + // certFile is a file containing a PEM-encoded certificate + CertFile string `json:"certFile"` + // keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile + KeyFile string `json:"keyFile"` +} + +// NamedCertificate specifies a certificate/key, and the names it should be served for +type NamedCertificate struct { + // names is a list of DNS names this certificate should be used to secure + // A name can be a normal DNS name, or can contain leading wildcard segments. + Names []string `json:"names,omitempty"` + // CertInfo is the TLS cert info for serving secure traffic + CertInfo `json:",inline"` +} + +// LeaderElection provides information to elect a leader +type LeaderElection struct { + // disable allows leader election to be suspended while allowing a fully defaulted "normal" startup case. + Disable bool `json:"disable,omitempty"` + // namespace indicates which namespace the resource is in + Namespace string `json:"namespace,omitempty"` + // name indicates what name to use for the resource + Name string `json:"name,omitempty"` + + // leaseDuration is the duration that non-leader candidates will wait + // after observing a leadership renewal until attempting to acquire + // leadership of a led but unrenewed leader slot. This is effectively the + // maximum duration that a leader can be stopped before it is replaced + // by another candidate. This is only applicable if leader election is + // enabled. + // +nullable + LeaseDuration metav1.Duration `json:"leaseDuration"` + // renewDeadline is the interval between attempts by the acting master to + // renew a leadership slot before it stops leading. This must be less + // than or equal to the lease duration. This is only applicable if leader + // election is enabled. + // +nullable + RenewDeadline metav1.Duration `json:"renewDeadline"` + // retryPeriod is the duration the clients should wait between attempting + // acquisition and renewal of a leadership. This is only applicable if + // leader election is enabled. + // +nullable + RetryPeriod metav1.Duration `json:"retryPeriod"` +} + +// StringSource allows specifying a string inline, or externally via env var or file. +// When it contains only a string value, it marshals to a simple JSON string. +type StringSource struct { + // StringSourceSpec specifies the string value, or external location + StringSourceSpec `json:",inline"` +} + +// StringSourceSpec specifies a string value, or external location +type StringSourceSpec struct { + // value specifies the cleartext value, or an encrypted value if keyFile is specified. + Value string `json:"value"` + + // env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified. + Env string `json:"env"` + + // file references a file containing the cleartext value, or an encrypted value if a keyFile is specified. + File string `json:"file"` + + // keyFile references a file containing the key to use to decrypt the value. + KeyFile string `json:"keyFile"` +} + +// RemoteConnectionInfo holds information necessary for establishing a remote connection +type RemoteConnectionInfo struct { + // url is the remote URL to connect to + URL string `json:"url"` + // ca is the CA for verifying TLS connections + CA string `json:"ca"` + // CertInfo is the TLS client cert information to present + // this is anonymous so that we can inline it for serialization + CertInfo `json:",inline"` +} + +type AdmissionConfig struct { + PluginConfig map[string]AdmissionPluginConfig `json:"pluginConfig,omitempty"` + + // enabledPlugins is a list of admission plugins that must be on in addition to the default list. + // Some admission plugins are disabled by default, but certain configurations require them. This is fairly uncommon + // and can result in performance penalties and unexpected behavior. + EnabledAdmissionPlugins []string `json:"enabledPlugins,omitempty"` + + // disabledPlugins is a list of admission plugins that must be off. Putting something in this list + // is almost always a mistake and likely to result in cluster instability. + DisabledAdmissionPlugins []string `json:"disabledPlugins,omitempty"` +} + +// AdmissionPluginConfig holds the necessary configuration options for admission plugins +type AdmissionPluginConfig struct { + // location is the path to a configuration file that contains the plugin's + // configuration + Location string `json:"location"` + + // configuration is an embedded configuration object to be used as the plugin's + // configuration. If present, it will be used instead of the path to the configuration file. + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + Configuration runtime.RawExtension `json:"configuration"` +} + +type LogFormatType string + +type WebHookModeType string + +const ( + // LogFormatLegacy saves event in 1-line text format. + LogFormatLegacy LogFormatType = "legacy" + // LogFormatJson saves event in structured json format. + LogFormatJson LogFormatType = "json" + + // WebHookModeBatch indicates that the webhook should buffer audit events + // internally, sending batch updates either once a certain number of + // events have been received or a certain amount of time has passed. + WebHookModeBatch WebHookModeType = "batch" + // WebHookModeBlocking causes the webhook to block on every attempt to process + // a set of events. This causes requests to the API server to wait for a + // round trip to the external audit service before sending a response. + WebHookModeBlocking WebHookModeType = "blocking" +) + +// AuditConfig holds configuration for the audit capabilities +type AuditConfig struct { + // If this flag is set, audit log will be printed in the logs. + // The logs contains, method, user and a requested URL. + Enabled bool `json:"enabled"` + // All requests coming to the apiserver will be logged to this file. + AuditFilePath string `json:"auditFilePath"` + // Maximum number of days to retain old log files based on the timestamp encoded in their filename. + MaximumFileRetentionDays int32 `json:"maximumFileRetentionDays"` + // Maximum number of old log files to retain. + MaximumRetainedFiles int32 `json:"maximumRetainedFiles"` + // Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB. + MaximumFileSizeMegabytes int32 `json:"maximumFileSizeMegabytes"` + + // policyFile is a path to the file that defines the audit policy configuration. + PolicyFile string `json:"policyFile"` + // policyConfiguration is an embedded policy configuration object to be used + // as the audit policy configuration. If present, it will be used instead of + // the path to the policy file. + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + PolicyConfiguration runtime.RawExtension `json:"policyConfiguration"` + + // Format of saved audits (legacy or json). + LogFormat LogFormatType `json:"logFormat"` + + // Path to a .kubeconfig formatted file that defines the audit webhook configuration. + WebHookKubeConfig string `json:"webHookKubeConfig"` + // Strategy for sending audit events (block or batch). + WebHookMode WebHookModeType `json:"webHookMode"` +} + +// EtcdConnectionInfo holds information necessary for connecting to an etcd server +type EtcdConnectionInfo struct { + // urls are the URLs for etcd + URLs []string `json:"urls,omitempty"` + // ca is a file containing trusted roots for the etcd server certificates + CA string `json:"ca"` + // CertInfo is the TLS client cert information for securing communication to etcd + // this is anonymous so that we can inline it for serialization + CertInfo `json:",inline"` +} + +type EtcdStorageConfig struct { + EtcdConnectionInfo `json:",inline"` + + // storagePrefix is the path within etcd that the OpenShift resources will + // be rooted under. This value, if changed, will mean existing objects in etcd will + // no longer be located. + StoragePrefix string `json:"storagePrefix"` +} + +// GenericAPIServerConfig is an inline-able struct for aggregated apiservers that need to store data in etcd +type GenericAPIServerConfig struct { + // servingInfo describes how to start serving + ServingInfo HTTPServingInfo `json:"servingInfo"` + + // corsAllowedOrigins + CORSAllowedOrigins []string `json:"corsAllowedOrigins"` + + // auditConfig describes how to configure audit information + AuditConfig AuditConfig `json:"auditConfig"` + + // storageConfig contains information about how to use + StorageConfig EtcdStorageConfig `json:"storageConfig"` + + // admissionConfig holds information about how to configure admission. + AdmissionConfig AdmissionConfig `json:"admission"` + + KubeClientConfig KubeClientConfig `json:"kubeClientConfig"` +} + +type KubeClientConfig struct { + // kubeConfig is a .kubeconfig filename for going to the owning kube-apiserver. Empty uses an in-cluster-config + KubeConfig string `json:"kubeConfig"` + + // connectionOverrides specifies client overrides for system components to loop back to this master. + ConnectionOverrides ClientConnectionOverrides `json:"connectionOverrides"` +} + +type ClientConnectionOverrides struct { + // acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the + // default value of 'application/json'. This field will control all connections to the server used by a particular + // client. + AcceptContentTypes string `json:"acceptContentTypes"` + // contentType is the content type used when sending data to the server from this client. + ContentType string `json:"contentType"` + + // qps controls the number of queries per second allowed for this connection. + QPS float32 `json:"qps"` + // burst allows extra queries to accumulate when a client is exceeding its rate. + Burst int32 `json:"burst"` +} + +// GenericControllerConfig provides information to configure a controller +type GenericControllerConfig struct { + // servingInfo is the HTTP serving information for the controller's endpoints + ServingInfo HTTPServingInfo `json:"servingInfo"` + + // leaderElection provides information to elect a leader. Only override this if you have a specific need + LeaderElection LeaderElection `json:"leaderElection"` + + // authentication allows configuration of authentication for the endpoints + Authentication DelegatedAuthentication `json:"authentication"` + // authorization allows configuration of authentication for the endpoints + Authorization DelegatedAuthorization `json:"authorization"` +} + +// DelegatedAuthentication allows authentication to be disabled. +type DelegatedAuthentication struct { + // disabled indicates that authentication should be disabled. By default it will use delegated authentication. + Disabled bool `json:"disabled,omitempty"` +} + +// DelegatedAuthorization allows authorization to be disabled. +type DelegatedAuthorization struct { + // disabled indicates that authorization should be disabled. By default it will use delegated authorization. + Disabled bool `json:"disabled,omitempty"` +} +type RequiredHSTSPolicy struct { + // namespaceSelector specifies a label selector such that the policy applies only to those routes that + // are in namespaces with labels that match the selector, and are in one of the DomainPatterns. + // Defaults to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` + + // domainPatterns is a list of domains for which the desired HSTS annotations are required. + // If domainPatterns is specified and a route is created with a spec.host matching one of the domains, + // the route must specify the HSTS Policy components described in the matching RequiredHSTSPolicy. + // + // The use of wildcards is allowed like this: *.foo.com matches everything under foo.com. + // foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*. + // +kubebuilder:validation:MinItems=1 + // +required + DomainPatterns []string `json:"domainPatterns"` + + // maxAge is the delta time range in seconds during which hosts are regarded as HSTS hosts. + // If set to 0, it negates the effect, and hosts are removed as HSTS hosts. + // If set to 0 and includeSubdomains is specified, all subdomains of the host are also removed as HSTS hosts. + // maxAge is a time-to-live value, and if this policy is not refreshed on a client, the HSTS + // policy will eventually expire on that client. + MaxAge MaxAgePolicy `json:"maxAge"` + + // preloadPolicy directs the client to include hosts in its host preload list so that + // it never needs to do an initial load to get the HSTS header (note that this is not defined + // in RFC 6797 and is therefore client implementation-dependent). + // +optional + PreloadPolicy PreloadPolicy `json:"preloadPolicy,omitempty"` + + // includeSubDomainsPolicy means the HSTS Policy should apply to any subdomains of the host's + // domain name. Thus, for the host bar.foo.com, if includeSubDomainsPolicy was set to RequireIncludeSubDomains: + // - the host app.bar.foo.com would inherit the HSTS Policy of bar.foo.com + // - the host bar.foo.com would inherit the HSTS Policy of bar.foo.com + // - the host foo.com would NOT inherit the HSTS Policy of bar.foo.com + // - the host def.foo.com would NOT inherit the HSTS Policy of bar.foo.com + // +optional + IncludeSubDomainsPolicy IncludeSubDomainsPolicy `json:"includeSubDomainsPolicy,omitempty"` +} + +// MaxAgePolicy contains a numeric range for specifying a compliant HSTS max-age for the enclosing RequiredHSTSPolicy +type MaxAgePolicy struct { + // The largest allowed value (in seconds) of the RequiredHSTSPolicy max-age + // This value can be left unspecified, in which case no upper limit is enforced. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=2147483647 + LargestMaxAge *int32 `json:"largestMaxAge,omitempty"` + + // The smallest allowed value (in seconds) of the RequiredHSTSPolicy max-age + // Setting max-age=0 allows the deletion of an existing HSTS header from a host. This is a necessary + // tool for administrators to quickly correct mistakes. + // This value can be left unspecified, in which case no lower limit is enforced. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=2147483647 + SmallestMaxAge *int32 `json:"smallestMaxAge,omitempty"` +} + +// PreloadPolicy contains a value for specifying a compliant HSTS preload policy for the enclosing RequiredHSTSPolicy +// +kubebuilder:validation:Enum=RequirePreload;RequireNoPreload;NoOpinion +type PreloadPolicy string + +const ( + // RequirePreloadPolicy means HSTS "preload" is required by the RequiredHSTSPolicy + RequirePreloadPolicy PreloadPolicy = "RequirePreload" + + // RequireNoPreloadPolicy means HSTS "preload" is forbidden by the RequiredHSTSPolicy + RequireNoPreloadPolicy PreloadPolicy = "RequireNoPreload" + + // NoOpinionPreloadPolicy means HSTS "preload" doesn't matter to the RequiredHSTSPolicy + NoOpinionPreloadPolicy PreloadPolicy = "NoOpinion" +) + +// IncludeSubDomainsPolicy contains a value for specifying a compliant HSTS includeSubdomains policy +// for the enclosing RequiredHSTSPolicy +// +kubebuilder:validation:Enum=RequireIncludeSubDomains;RequireNoIncludeSubDomains;NoOpinion +type IncludeSubDomainsPolicy string + +const ( + // RequireIncludeSubDomains means HSTS "includeSubDomains" is required by the RequiredHSTSPolicy + RequireIncludeSubDomains IncludeSubDomainsPolicy = "RequireIncludeSubDomains" + + // RequireNoIncludeSubDomains means HSTS "includeSubDomains" is forbidden by the RequiredHSTSPolicy + RequireNoIncludeSubDomains IncludeSubDomainsPolicy = "RequireNoIncludeSubDomains" + + // NoOpinionIncludeSubDomains means HSTS "includeSubDomains" doesn't matter to the RequiredHSTSPolicy + NoOpinionIncludeSubDomains IncludeSubDomainsPolicy = "NoOpinion" +) + +// IBMCloudServiceName contains a value specifying the name of an IBM Cloud Service, +// which are used by MAPI, CIRO, CIO, Installer, etc. +// +kubebuilder:validation:Enum=CIS;COS;COSConfig;DNSServices;GlobalCatalog;GlobalSearch;GlobalTagging;HyperProtect;IAM;KeyProtect;ResourceController;ResourceManager;VPC +type IBMCloudServiceName string + +const ( + // IBMCloudServiceCIS is the name for IBM Cloud CIS. + IBMCloudServiceCIS IBMCloudServiceName = "CIS" + // IBMCloudServiceCOS is the name for IBM Cloud COS. + IBMCloudServiceCOS IBMCloudServiceName = "COS" + // IBMCloudServiceCOSConfig is the name for IBM Cloud COS Config service. + IBMCloudServiceCOSConfig IBMCloudServiceName = "COSConfig" + // IBMCloudServiceDNSServices is the name for IBM Cloud DNS Services. + IBMCloudServiceDNSServices IBMCloudServiceName = "DNSServices" + // IBMCloudServiceGlobalCatalog is the name for IBM Cloud Global Catalog service. + IBMCloudServiceGlobalCatalog IBMCloudServiceName = "GlobalCatalog" + // IBMCloudServiceGlobalSearch is the name for IBM Cloud Global Search. + IBMCloudServiceGlobalSearch IBMCloudServiceName = "GlobalSearch" + // IBMCloudServiceGlobalTagging is the name for IBM Cloud Global Tagging. + IBMCloudServiceGlobalTagging IBMCloudServiceName = "GlobalTagging" + // IBMCloudServiceHyperProtect is the name for IBM Cloud Hyper Protect. + IBMCloudServiceHyperProtect IBMCloudServiceName = "HyperProtect" + // IBMCloudServiceIAM is the name for IBM Cloud IAM. + IBMCloudServiceIAM IBMCloudServiceName = "IAM" + // IBMCloudServiceKeyProtect is the name for IBM Cloud Key Protect. + IBMCloudServiceKeyProtect IBMCloudServiceName = "KeyProtect" + // IBMCloudServiceResourceController is the name for IBM Cloud Resource Controller. + IBMCloudServiceResourceController IBMCloudServiceName = "ResourceController" + // IBMCloudServiceResourceManager is the name for IBM Cloud Resource Manager. + IBMCloudServiceResourceManager IBMCloudServiceName = "ResourceManager" + // IBMCloudServiceVPC is the name for IBM Cloud VPC. + IBMCloudServiceVPC IBMCloudServiceName = "VPC" +) diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go new file mode 100644 index 0000000000000..75b647f745c37 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -0,0 +1,224 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// APIServer holds configuration (like serving certificates, client CA and CORS domains) +// shared by all API servers in the system, among them especially kube-apiserver +// and openshift-apiserver. The canonical name of an instance is 'cluster'. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=apiservers,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type APIServer struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec holds user settable values for configuration + // +required + Spec APIServerSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status APIServerStatus `json:"status"` +} + +type APIServerSpec struct { + // servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates + // will be used for serving secure traffic. + // +optional + ServingCerts APIServerServingCerts `json:"servingCerts"` + // clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for + // incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. + // You usually only have to set this if you have your own PKI you wish to honor client certificates from. + // The ConfigMap must exist in the openshift-config namespace and contain the following required fields: + // - ConfigMap.Data["ca-bundle.crt"] - CA bundle. + // +optional + ClientCA ConfigMapNameReference `json:"clientCA"` + // additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the + // API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth + // server from JavaScript applications. + // The values are regular expressions that correspond to the Golang regular expression language. + // +optional + AdditionalCORSAllowedOrigins []string `json:"additionalCORSAllowedOrigins,omitempty"` + // encryption allows the configuration of encryption of resources at the datastore layer. + // +optional + Encryption APIServerEncryption `json:"encryption"` + // tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. + // + // If unset, a default (which may change between releases) is chosen. Note that only Old, + // Intermediate and Custom profiles are currently supported, and the maximum available + // minTLSVersion is VersionTLS12. + // +optional + TLSSecurityProfile *TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` + // audit specifies the settings for audit configuration to be applied to all OpenShift-provided + // API servers in the cluster. + // +optional + // +kubebuilder:default={profile: Default} + Audit Audit `json:"audit"` +} + +// AuditProfileType defines the audit policy profile type. +// +kubebuilder:validation:Enum=Default;WriteRequestBodies;AllRequestBodies;None +type AuditProfileType string + +const ( + // "None" disables audit logs. + NoneAuditProfileType AuditProfileType = "None" + + // "Default" is the existing default audit configuration policy. + DefaultAuditProfileType AuditProfileType = "Default" + + // "WriteRequestBodies" is similar to Default but it logs request and response + // HTTP payloads for write requests (create, update, patch) + WriteRequestBodiesAuditProfileType AuditProfileType = "WriteRequestBodies" + + // "AllRequestBodies" is similar to WriteRequestBodies, but also logs request + // and response HTTP payloads for read requests (get, list). + AllRequestBodiesAuditProfileType AuditProfileType = "AllRequestBodies" +) + +type Audit struct { + // profile specifies the name of the desired top-level audit profile to be applied to all requests + // sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, + // openshift-apiserver and oauth-apiserver), with the exception of those requests that match + // one or more of the customRules. + // + // The following profiles are provided: + // - Default: default policy which means MetaData level logging with the exception of events + // (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody + // level). + // - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for + // write requests (create, update, patch). + // - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response + // HTTP payloads for read requests (get, list). + // - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. + // + // Warning: It is not recommended to disable audit logging by using the `None` profile unless you + // are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. + // If you disable audit logging and a support situation arises, you might need to enable audit logging + // and reproduce the issue in order to troubleshoot properly. + // + // If unset, the 'Default' profile is used as the default. + // + // +kubebuilder:default=Default + Profile AuditProfileType `json:"profile,omitempty"` + // customRules specify profiles per group. These profile take precedence over the + // top-level profile field if they apply. They are evaluation from top to bottom and + // the first one that matches, applies. + // +listType=map + // +listMapKey=group + // +optional + CustomRules []AuditCustomRule `json:"customRules,omitempty"` +} + +// AuditCustomRule describes a custom rule for an audit profile that takes precedence over +// the top-level profile. +type AuditCustomRule struct { + // group is a name of group a request user must be member of in order to this profile to apply. + // + // +kubebuilder:validation:MinLength=1 + // +required + Group string `json:"group"` + // profile specifies the name of the desired audit policy configuration to be deployed to + // all OpenShift-provided API servers in the cluster. + // + // The following profiles are provided: + // - Default: the existing default policy. + // - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for + // write requests (create, update, patch). + // - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response + // HTTP payloads for read requests (get, list). + // - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. + // + // If unset, the 'Default' profile is used as the default. + // + // +required + Profile AuditProfileType `json:"profile,omitempty"` +} + +type APIServerServingCerts struct { + // namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. + // If no named certificates are provided, or no named certificates match the server name as understood by a client, + // the defaultServingCertificate will be used. + // +optional + NamedCertificates []APIServerNamedServingCert `json:"namedCertificates,omitempty"` +} + +// APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate. +type APIServerNamedServingCert struct { + // names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to + // serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. + // Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names. + // +optional + Names []string `json:"names,omitempty"` + // servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. + // The secret must exist in the openshift-config namespace and contain the following required fields: + // - Secret.Data["tls.key"] - TLS private key. + // - Secret.Data["tls.crt"] - TLS certificate. + ServingCertificate SecretNameReference `json:"servingCertificate"` +} + +type APIServerEncryption struct { + // type defines what encryption type should be used to encrypt resources at the datastore layer. + // When this field is unset (i.e. when it is set to the empty string), identity is implied. + // The behavior of unset can and will change over time. Even if encryption is enabled by default, + // the meaning of unset may change to a different encryption type based on changes in best practices. + // + // When encryption is enabled, all sensitive resources shipped with the platform are encrypted. + // This list of sensitive resources can and will change over time. The current authoritative list is: + // + // 1. secrets + // 2. configmaps + // 3. routes.route.openshift.io + // 4. oauthaccesstokens.oauth.openshift.io + // 5. oauthauthorizetokens.oauth.openshift.io + // + // +unionDiscriminator + // +optional + Type EncryptionType `json:"type,omitempty"` +} + +// +kubebuilder:validation:Enum="";identity;aescbc;aesgcm +type EncryptionType string + +const ( + // identity refers to a type where no encryption is performed at the datastore layer. + // Resources are written as-is without encryption. + EncryptionTypeIdentity EncryptionType = "identity" + + // aescbc refers to a type where AES-CBC with PKCS#7 padding and a 32-byte key + // is used to perform encryption at the datastore layer. + EncryptionTypeAESCBC EncryptionType = "aescbc" + + // aesgcm refers to a type where AES-GCM with random nonce and a 32-byte key + // is used to perform encryption at the datastore layer. + EncryptionTypeAESGCM EncryptionType = "aesgcm" +) + +type APIServerStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type APIServerList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + Items []APIServer `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go new file mode 100644 index 0000000000000..65dffddb00fd5 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -0,0 +1,465 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDC,rule="!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace == specC.componentNamespace && statusC.componentName == specC.componentName) || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, oldC.componentNamespace == specC.componentNamespace && oldC.componentName == specC.componentName)))))",message="all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" + +// Authentication specifies cluster-wide settings for authentication (like OAuth and +// webhook token authenticators). The canonical name of an instance is `cluster`. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=authentications,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type Authentication struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec AuthenticationSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status AuthenticationStatus `json:"status"` +} + +type AuthenticationSpec struct { + // type identifies the cluster managed, user facing authentication mode in use. + // Specifically, it manages the component that responds to login attempts. + // The default is IntegratedOAuth. + // +optional + Type AuthenticationType `json:"type"` + + // oauthMetadata contains the discovery endpoint data for OAuth 2.0 + // Authorization Server Metadata for an external OAuth server. + // This discovery document can be viewed from its served location: + // oc get --raw '/.well-known/oauth-authorization-server' + // For further details, see the IETF Draft: + // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + // If oauthMetadata.name is non-empty, this value has precedence + // over any metadata reference stored in status. + // The key "oauthMetadata" is used to locate the data. + // If specified and the config map or expected key is not found, no metadata is served. + // If the specified metadata is not valid, no metadata is served. + // The namespace for this config map is openshift-config. + // +optional + OAuthMetadata ConfigMapNameReference `json:"oauthMetadata"` + + // webhookTokenAuthenticators is DEPRECATED, setting it has no effect. + // +listType=atomic + WebhookTokenAuthenticators []DeprecatedWebhookTokenAuthenticator `json:"webhookTokenAuthenticators,omitempty"` + + // webhookTokenAuthenticator configures a remote token reviewer. + // These remote authentication webhooks can be used to verify bearer tokens + // via the tokenreviews.authentication.k8s.io REST API. This is required to + // honor bearer tokens that are provisioned by an external authentication service. + // + // Can only be set if "Type" is set to "None". + // + // +optional + WebhookTokenAuthenticator *WebhookTokenAuthenticator `json:"webhookTokenAuthenticator,omitempty"` + + // serviceAccountIssuer is the identifier of the bound service account token + // issuer. + // The default is https://kubernetes.default.svc + // WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the + // previous issuer value. Instead, the tokens issued by previous service account issuer will continue to + // be trusted for a time period chosen by the platform (currently set to 24h). + // This time period is subject to change over time. + // This allows internal components to transition to use new service account issuer without service distruption. + // +optional + ServiceAccountIssuer string `json:"serviceAccountIssuer"` + + // oidcProviders are OIDC identity providers that can issue tokens + // for this cluster + // Can only be set if "Type" is set to "OIDC". + // + // At most one provider can be configured. + // + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=1 + // +openshift:enable:FeatureGate=ExternalOIDC + OIDCProviders []OIDCProvider `json:"oidcProviders,omitempty"` +} + +type AuthenticationStatus struct { + // integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 + // Authorization Server Metadata for the in-cluster integrated OAuth server. + // This discovery document can be viewed from its served location: + // oc get --raw '/.well-known/oauth-authorization-server' + // For further details, see the IETF Draft: + // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + // This contains the observed value based on cluster state. + // An explicitly set value in spec.oauthMetadata has precedence over this field. + // This field has no meaning if authentication spec.type is not set to IntegratedOAuth. + // The key "oauthMetadata" is used to locate the data. + // If the config map or expected key is not found, no metadata is served. + // If the specified metadata is not valid, no metadata is served. + // The namespace for this config map is openshift-config-managed. + IntegratedOAuthMetadata ConfigMapNameReference `json:"integratedOAuthMetadata"` + + // oidcClients is where participating operators place the current OIDC client status + // for OIDC clients that can be customized by the cluster-admin. + // + // +listType=map + // +listMapKey=componentNamespace + // +listMapKey=componentName + // +kubebuilder:validation:MaxItems=20 + // +openshift:enable:FeatureGate=ExternalOIDC + OIDCClients []OIDCClientStatus `json:"oidcClients"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type AuthenticationList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Authentication `json:"items"` +} + +// +openshift:validation:FeatureGateAwareEnum:featureGate="",enum="";None;IntegratedOAuth +// +openshift:validation:FeatureGateAwareEnum:featureGate=ExternalOIDC,enum="";None;IntegratedOAuth;OIDC +type AuthenticationType string + +const ( + // None means that no cluster managed authentication system is in place. + // Note that user login will only work if a manually configured system is in place and + // referenced in authentication spec via oauthMetadata and + // webhookTokenAuthenticator/oidcProviders + AuthenticationTypeNone AuthenticationType = "None" + + // IntegratedOAuth refers to the cluster managed OAuth server. + // It is configured via the top level OAuth config. + AuthenticationTypeIntegratedOAuth AuthenticationType = "IntegratedOAuth" + + // AuthenticationTypeOIDC refers to a configuration with an external + // OIDC server configured directly with the kube-apiserver. + AuthenticationTypeOIDC AuthenticationType = "OIDC" +) + +// deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. +// It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. +type DeprecatedWebhookTokenAuthenticator struct { + // kubeConfig contains kube config file data which describes how to access the remote webhook service. + // For further details, see: + // https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication + // The key "kubeConfig" is used to locate the data. + // If the secret or expected key is not found, the webhook is not honored. + // If the specified kube config data is not valid, the webhook is not honored. + // The namespace for this secret is determined by the point of use. + KubeConfig SecretNameReference `json:"kubeConfig"` +} + +// webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator +type WebhookTokenAuthenticator struct { + // kubeConfig references a secret that contains kube config file data which + // describes how to access the remote webhook service. + // The namespace for the referenced secret is openshift-config. + // + // For further details, see: + // + // https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication + // + // The key "kubeConfig" is used to locate the data. + // If the secret or expected key is not found, the webhook is not honored. + // If the specified kube config data is not valid, the webhook is not honored. + // +required + KubeConfig SecretNameReference `json:"kubeConfig"` +} + +const ( + // OAuthMetadataKey is the key for the oauth authorization server metadata + OAuthMetadataKey = "oauthMetadata" + + // KubeConfigKey is the key for the kube config file data in a secret + KubeConfigKey = "kubeConfig" +) + +type OIDCProvider struct { + // name of the OIDC provider + // + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name"` + // issuer describes atributes of the OIDC token issuer + // + // +required + Issuer TokenIssuer `json:"issuer"` + + // oidcClients contains configuration for the platform's clients that + // need to request tokens from the issuer + // + // +listType=map + // +listMapKey=componentNamespace + // +listMapKey=componentName + // +kubebuilder:validation:MaxItems=20 + OIDCClients []OIDCClientConfig `json:"oidcClients"` + + // claimMappings describes rules on how to transform information from an + // ID token into a cluster identity + ClaimMappings TokenClaimMappings `json:"claimMappings"` + + // claimValidationRules are rules that are applied to validate token claims to authenticate users. + // + // +listType=atomic + ClaimValidationRules []TokenClaimValidationRule `json:"claimValidationRules,omitempty"` +} + +// +kubebuilder:validation:MinLength=1 +type TokenAudience string + +type TokenIssuer struct { + // URL is the serving URL of the token issuer. + // Must use the https:// scheme. + // + // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]` + // +required + URL string `json:"issuerURL"` + + // audiences is an array of audiences that the token was issued for. + // Valid tokens must include at least one of these values in their + // "aud" claim. + // Must be set to exactly one value. + // + // +listType=set + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=10 + // +required + Audiences []TokenAudience `json:"audiences"` + + // CertificateAuthority is a reference to a config map in the + // configuration namespace. The .data of the configMap must contain + // the "ca-bundle.crt" key. + // If unset, system trust is used instead. + CertificateAuthority ConfigMapNameReference `json:"issuerCertificateAuthority"` +} + +type TokenClaimMappings struct { + // username is a name of the claim that should be used to construct + // usernames for the cluster identity. + // + // Default value: "sub" + Username UsernameClaimMapping `json:"username,omitempty"` + + // groups is a name of the claim that should be used to construct + // groups for the cluster identity. + // The referenced claim must use array of strings values. + Groups PrefixedClaimMapping `json:"groups,omitempty"` +} + +type TokenClaimMapping struct { + // claim is a JWT token claim to be used in the mapping + // + // +required + Claim string `json:"claim"` +} + +type OIDCClientConfig struct { + // componentName is the name of the component that is supposed to consume this + // client configuration + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +required + ComponentName string `json:"componentName"` + + // componentNamespace is the namespace of the component that is supposed to consume this + // client configuration + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +required + ComponentNamespace string `json:"componentNamespace"` + + // clientID is the identifier of the OIDC client from the OIDC provider + // + // +kubebuilder:validation:MinLength=1 + // +required + ClientID string `json:"clientID"` + + // clientSecret refers to a secret in the `openshift-config` namespace that + // contains the client secret in the `clientSecret` key of the `.data` field + ClientSecret SecretNameReference `json:"clientSecret"` + + // extraScopes is an optional set of scopes to request tokens with. + // + // +listType=set + ExtraScopes []string `json:"extraScopes"` +} + +type OIDCClientStatus struct { + // componentName is the name of the component that will consume a client configuration. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +required + ComponentName string `json:"componentName"` + + // componentNamespace is the namespace of the component that will consume a client configuration. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +required + ComponentNamespace string `json:"componentNamespace"` + + // currentOIDCClients is a list of clients that the component is currently using. + // + // +listType=map + // +listMapKey=issuerURL + // +listMapKey=clientID + CurrentOIDCClients []OIDCClientReference `json:"currentOIDCClients"` + + // consumingUsers is a slice of ServiceAccounts that need to have read + // permission on the `clientSecret` secret. + // + // +kubebuilder:validation:MaxItems=5 + // +listType=set + ConsumingUsers []ConsumingUser `json:"consumingUsers"` + + // conditions are used to communicate the state of the `oidcClients` entry. + // + // Supported conditions include Available, Degraded and Progressing. + // + // If Available is true, the component is successfully using the configured client. + // If Degraded is true, that means something has gone wrong trying to handle the client configuration. + // If Progressing is true, that means the component is taking some action related to the `oidcClients` entry. + // + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +type OIDCClientReference struct { + // OIDCName refers to the `name` of the provider from `oidcProviders` + // + // +kubebuilder:validation:MinLength=1 + // +required + OIDCProviderName string `json:"oidcProviderName"` + + // URL is the serving URL of the token issuer. + // Must use the https:// scheme. + // + // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]` + // +required + IssuerURL string `json:"issuerURL"` + + // clientID is the identifier of the OIDC client from the OIDC provider + // + // +kubebuilder:validation:MinLength=1 + // +required + ClientID string `json:"clientID"` +} + +// +kubebuilder:validation:XValidation:rule="has(self.prefixPolicy) && self.prefixPolicy == 'Prefix' ? (has(self.prefix) && size(self.prefix.prefixString) > 0) : !has(self.prefix)",message="prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" +type UsernameClaimMapping struct { + TokenClaimMapping `json:",inline"` + + // prefixPolicy specifies how a prefix should apply. + // + // By default, claims other than `email` will be prefixed with the issuer URL to + // prevent naming clashes with other plugins. + // + // Set to "NoPrefix" to disable prefixing. + // + // Example: + // (1) `prefix` is set to "myoidc:" and `claim` is set to "username". + // If the JWT claim `username` contains value `userA`, the resulting + // mapped value will be "myoidc:userA". + // (2) `prefix` is set to "myoidc:" and `claim` is set to "email". If the + // JWT `email` claim contains value "userA@myoidc.tld", the resulting + // mapped value will be "myoidc:userA@myoidc.tld". + // (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, + // the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", + // and `claim` is set to: + // (a) "username": the mapped value will be "https://myoidc.tld#userA" + // (b) "email": the mapped value will be "userA@myoidc.tld" + // + // +kubebuilder:validation:Enum={"", "NoPrefix", "Prefix"} + PrefixPolicy UsernamePrefixPolicy `json:"prefixPolicy"` + + Prefix *UsernamePrefix `json:"prefix"` +} + +type UsernamePrefixPolicy string + +var ( + // NoOpinion let's the cluster assign prefixes. If the username claim is email, there is no prefix + // If the username claim is anything else, it is prefixed by the issuerURL + NoOpinion UsernamePrefixPolicy = "" + + // NoPrefix means the username claim value will not have any prefix + NoPrefix UsernamePrefixPolicy = "NoPrefix" + + // Prefix means the prefix value must be specified. It cannot be empty + Prefix UsernamePrefixPolicy = "Prefix" +) + +type UsernamePrefix struct { + // +kubebuilder:validation:MinLength=1 + // +required + PrefixString string `json:"prefixString"` +} + +type PrefixedClaimMapping struct { + TokenClaimMapping `json:",inline"` + + // prefix is a string to prefix the value from the token in the result of the + // claim mapping. + // + // By default, no prefixing occurs. + // + // Example: if `prefix` is set to "myoidc:"" and the `claim` in JWT contains + // an array of strings "a", "b" and "c", the mapping will result in an + // array of string "myoidc:a", "myoidc:b" and "myoidc:c". + Prefix string `json:"prefix"` +} + +type TokenValidationRuleType string + +const ( + TokenValidationRuleTypeRequiredClaim = "RequiredClaim" +) + +type TokenClaimValidationRule struct { + // type sets the type of the validation rule + // + // +kubebuilder:validation:Enum={"RequiredClaim"} + // +kubebuilder:default="RequiredClaim" + Type TokenValidationRuleType `json:"type"` + + // requiredClaim allows configuring a required claim name and its expected + // value + RequiredClaim *TokenRequiredClaim `json:"requiredClaim"` +} + +type TokenRequiredClaim struct { + // claim is a name of a required claim. Only claims with string values are + // supported. + // + // +kubebuilder:validation:MinLength=1 + // +required + Claim string `json:"claim"` + + // requiredValue is the required value for the claim. + // + // +kubebuilder:validation:MinLength=1 + // +required + RequiredValue string `json:"requiredValue"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_build.go b/vendor/github.com/openshift/api/config/v1/types_build.go new file mode 100644 index 0000000000000..dcde1fc5b811b --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_build.go @@ -0,0 +1,132 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Build configures the behavior of OpenShift builds for the entire cluster. +// This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds. +// +// The canonical name is "cluster" +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=openshift-controller-manager,operatorOrdering=01 +// +openshift:capability=Build +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=builds,scope=Cluster +// +kubebuilder:subresource:status +type Build struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user-settable values for the build controller configuration + // +required + Spec BuildSpec `json:"spec"` +} + +type BuildSpec struct { + // additionalTrustedCA is a reference to a ConfigMap containing additional CAs that + // should be trusted for image pushes and pulls during builds. + // The namespace for this config map is openshift-config. + // + // DEPRECATED: Additional CAs for image pull and push should be set on + // image.config.openshift.io/cluster instead. + // + // +optional + AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"` + // buildDefaults controls the default information for Builds + // +optional + BuildDefaults BuildDefaults `json:"buildDefaults"` + // buildOverrides controls override settings for builds + // +optional + BuildOverrides BuildOverrides `json:"buildOverrides"` +} + +type BuildDefaults struct { + // defaultProxy contains the default proxy settings for all build operations, including image pull/push + // and source download. + // + // Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables + // in the build config's strategy. + // +optional + DefaultProxy *ProxySpec `json:"defaultProxy,omitempty"` + + // gitProxy contains the proxy settings for git operations only. If set, this will override + // any Proxy settings for all git commands, such as git clone. + // + // Values that are not set here will be inherited from DefaultProxy. + // +optional + GitProxy *ProxySpec `json:"gitProxy,omitempty"` + + // env is a set of default environment variables that will be applied to the + // build if the specified variables do not exist on the build + // +optional + Env []corev1.EnvVar `json:"env,omitempty"` + + // imageLabels is a list of docker labels that are applied to the resulting image. + // User can override a default label by providing a label with the same name in their + // Build/BuildConfig. + // +optional + ImageLabels []ImageLabel `json:"imageLabels,omitempty"` + + // resources defines resource requirements to execute the build. + // +optional + Resources corev1.ResourceRequirements `json:"resources"` +} + +type ImageLabel struct { + // name defines the name of the label. It must have non-zero length. + Name string `json:"name"` + + // value defines the literal value of the label. + // +optional + Value string `json:"value,omitempty"` +} + +type BuildOverrides struct { + // imageLabels is a list of docker labels that are applied to the resulting image. + // If user provided a label in their Build/BuildConfig with the same name as one in this + // list, the user's label will be overwritten. + // +optional + ImageLabels []ImageLabel `json:"imageLabels,omitempty"` + + // nodeSelector is a selector which must be true for the build pod to fit on a node + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // tolerations is a list of Tolerations that will override any existing + // tolerations set on a build pod. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // forcePull overrides, if set, the equivalent value in the builds, + // i.e. false disables force pull for all builds, + // true enables force pull for all builds, + // independently of what each build specifies itself + // +optional + ForcePull *bool `json:"forcePull,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BuildList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Build `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go new file mode 100644 index 0000000000000..4a6823640d54f --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go @@ -0,0 +1,218 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterOperator is the Custom Resource object which holds the current state +// of an operator. This object is used by operators to convey their state to +// the rest of the cluster. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/497 +// +openshift:file-pattern=cvoRunLevel=0000_00,operatorName=cluster-version-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=clusteroperators,scope=Cluster,shortName=co +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name=Version,JSONPath=.status.versions[?(@.name=="operator")].version,type=string,description=The version the operator is at. +// +kubebuilder:printcolumn:name=Available,JSONPath=.status.conditions[?(@.type=="Available")].status,type=string,description=Whether the operator is running and stable. +// +kubebuilder:printcolumn:name=Progressing,JSONPath=.status.conditions[?(@.type=="Progressing")].status,type=string,description=Whether the operator is processing changes. +// +kubebuilder:printcolumn:name=Degraded,JSONPath=.status.conditions[?(@.type=="Degraded")].status,type=string,description=Whether the operator is degraded. +// +kubebuilder:printcolumn:name=Since,JSONPath=.status.conditions[?(@.type=="Available")].lastTransitionTime,type=date,description=The time the operator's Available status last changed. +// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true +type ClusterOperator struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // spec holds configuration that could apply to any operator. + // +required + Spec ClusterOperatorSpec `json:"spec"` + + // status holds the information about the state of an operator. It is consistent with status information across + // the Kubernetes ecosystem. + // +optional + Status ClusterOperatorStatus `json:"status"` +} + +// ClusterOperatorSpec is empty for now, but you could imagine holding information like "pause". +type ClusterOperatorSpec struct { +} + +// ClusterOperatorStatus provides information about the status of the operator. +// +k8s:deepcopy-gen=true +type ClusterOperatorStatus struct { + // conditions describes the state of the operator's managed and monitored components. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + + // versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple + // operand entries in the array. Available operators must report the version of the operator itself with the name "operator". + // An operator reports a new "operator" version when it has rolled out the new version to all of its operands. + // +optional + Versions []OperandVersion `json:"versions,omitempty"` + + // relatedObjects is a list of objects that are "interesting" or related to this operator. Common uses are: + // 1. the detailed resource driving the operator + // 2. operator namespaces + // 3. operand namespaces + // +optional + RelatedObjects []ObjectReference `json:"relatedObjects,omitempty"` + + // extension contains any additional status information specific to the + // operator which owns this status object. + // +nullable + // +optional + // +kubebuilder:pruning:PreserveUnknownFields + Extension runtime.RawExtension `json:"extension"` +} + +type OperandVersion struct { + // name is the name of the particular operand this version is for. It usually matches container images, not operators. + // +required + Name string `json:"name"` + + // version indicates which version of a particular operand is currently being managed. It must always match the Available + // operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout + // 1.1.0 + // +required + Version string `json:"version"` +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +type ObjectReference struct { + // group of the referent. + // +required + Group string `json:"group"` + // resource of the referent. + // +required + Resource string `json:"resource"` + // namespace of the referent. + // +optional + Namespace string `json:"namespace,omitempty"` + // name of the referent. + // +required + Name string `json:"name"` +} + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// ClusterOperatorStatusCondition represents the state of the operator's +// managed and monitored components. +// +k8s:deepcopy-gen=true +type ClusterOperatorStatusCondition struct { + // type specifies the aspect reported by this condition. + // +required + Type ClusterStatusConditionType `json:"type"` + + // status of the condition, one of True, False, Unknown. + // +required + Status ConditionStatus `json:"status"` + + // lastTransitionTime is the time of the last update to the current status property. + // +required + LastTransitionTime metav1.Time `json:"lastTransitionTime"` + + // reason is the CamelCase reason for the condition's current status. + // +optional + Reason string `json:"reason,omitempty"` + + // message provides additional information about the current condition. + // This is only to be consumed by humans. It may contain Line Feed + // characters (U+000A), which should be rendered as new lines. + // +optional + Message string `json:"message,omitempty"` +} + +// ClusterStatusConditionType is an aspect of operator state. +type ClusterStatusConditionType string + +const ( + // Available indicates that the component (operator and all configured operands) + // is functional and available in the cluster. Available=False means at least + // part of the component is non-functional, and that the condition requires + // immediate administrator intervention. + OperatorAvailable ClusterStatusConditionType = "Available" + + // Progressing indicates that the component (operator and all configured operands) + // is actively rolling out new code, propagating config changes, or otherwise + // moving from one steady state to another. Operators should not report + // progressing when they are reconciling (without action) a previously known + // state. If the observed cluster state has changed and the component is + // reacting to it (scaling up for instance), Progressing should become true + // since it is moving from one steady state to another. + OperatorProgressing ClusterStatusConditionType = "Progressing" + + // Degraded indicates that the component (operator and all configured operands) + // does not match its desired state over a period of time resulting in a lower + // quality of service. The period of time may vary by component, but a Degraded + // state represents persistent observation of a condition. As a result, a + // component should not oscillate in and out of Degraded state. A component may + // be Available even if its degraded. For example, a component may desire 3 + // running pods, but 1 pod is crash-looping. The component is Available but + // Degraded because it may have a lower quality of service. A component may be + // Progressing but not Degraded because the transition from one state to + // another does not persist over a long enough period to report Degraded. A + // component should not report Degraded during the course of a normal upgrade. + // A component may report Degraded in response to a persistent infrastructure + // failure that requires eventual administrator intervention. For example, if + // a control plane host is unhealthy and must be replaced. A component should + // report Degraded if unexpected errors occur over a period, but the + // expectation is that all unexpected errors are handled as operators mature. + OperatorDegraded ClusterStatusConditionType = "Degraded" + + // Upgradeable indicates whether the component (operator and all configured + // operands) is safe to upgrade based on the current cluster state. When + // Upgradeable is False, the cluster-version operator will prevent the + // cluster from performing impacted updates unless forced. When set on + // ClusterVersion, the message will explain which updates (minor or patch) + // are impacted. When set on ClusterOperator, False will block minor + // OpenShift updates. The message field should contain a human readable + // description of what the administrator should do to allow the cluster or + // component to successfully update. The cluster-version operator will + // allow updates when this condition is not False, including when it is + // missing, True, or Unknown. + OperatorUpgradeable ClusterStatusConditionType = "Upgradeable" + + // EvaluationConditionsDetected is used to indicate the result of the detection + // logic that was added to a component to evaluate the introduction of an + // invasive change that could potentially result in highly visible alerts, + // breakages or upgrade failures. You can concatenate multiple Reason using + // the "::" delimiter if you need to evaluate the introduction of multiple changes. + EvaluationConditionsDetected ClusterStatusConditionType = "EvaluationConditionsDetected" +) + +// ClusterOperatorList is a list of OperatorStatus resources. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:compatibility-gen:level=1 +type ClusterOperatorList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []ClusterOperator `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go new file mode 100644 index 0000000000000..8994ca97cdd4a --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -0,0 +1,908 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterVersion is the configuration for the ClusterVersionOperator. This is where +// parameters related to automatic updates can be set. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/495 +// +openshift:file-pattern=cvoRunLevel=0000_00,operatorName=cluster-version-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=clusterversions,scope=Cluster +// +kubebuilder:validation:XValidation:rule="has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) && self.spec.capabilities.baselineCapabilitySet == 'None' && 'marketplace' in self.spec.capabilities.additionalEnabledCapabilities ? 'OperatorLifecycleManager' in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) && 'OperatorLifecycleManager' in self.status.capabilities.enabledCapabilities) : true",message="the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability" +// +kubebuilder:printcolumn:name=Version,JSONPath=.status.history[?(@.state=="Completed")].version,type=string +// +kubebuilder:printcolumn:name=Available,JSONPath=.status.conditions[?(@.type=="Available")].status,type=string +// +kubebuilder:printcolumn:name=Progressing,JSONPath=.status.conditions[?(@.type=="Progressing")].status,type=string +// +kubebuilder:printcolumn:name=Since,JSONPath=.status.conditions[?(@.type=="Progressing")].lastTransitionTime,type=date +// +kubebuilder:printcolumn:name=Status,JSONPath=.status.conditions[?(@.type=="Progressing")].message,type=string +// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true +type ClusterVersion struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the desired state of the cluster version - the operator will work + // to ensure that the desired version is applied to the cluster. + // +required + Spec ClusterVersionSpec `json:"spec"` + // status contains information about the available updates and any in-progress + // updates. + // +optional + Status ClusterVersionStatus `json:"status"` +} + +// ClusterVersionSpec is the desired version state of the cluster. It includes +// the version the cluster should be at, how the cluster is identified, and +// where the cluster should look for version updates. +// +k8s:deepcopy-gen=true +type ClusterVersionSpec struct { + // clusterID uniquely identifies this cluster. This is expected to be + // an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in + // hexadecimal values). This is a required field. + // +required + ClusterID ClusterID `json:"clusterID"` + + // desiredUpdate is an optional field that indicates the desired value of + // the cluster version. Setting this value will trigger an upgrade (if + // the current version does not match the desired version). The set of + // recommended update values is listed as part of available updates in + // status, and setting values outside that range may cause the upgrade + // to fail. + // + // Some of the fields are inter-related with restrictions and meanings described here. + // 1. image is specified, version is specified, architecture is specified. API validation error. + // 2. image is specified, version is specified, architecture is not specified. You should not do this. version is silently ignored and image is used. + // 3. image is specified, version is not specified, architecture is specified. API validation error. + // 4. image is specified, version is not specified, architecture is not specified. image is used. + // 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. + // 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. + // 7. image is not specified, version is not specified, architecture is specified. API validation error. + // 8. image is not specified, version is not specified, architecture is not specified. API validation error. + // + // If an upgrade fails the operator will halt and report status + // about the failing component. Setting the desired update value back to + // the previous version will cause a rollback to be attempted. Not all + // rollbacks will succeed. + // + // +optional + DesiredUpdate *Update `json:"desiredUpdate,omitempty"` + + // upstream may be used to specify the preferred update server. By default + // it will use the appropriate update server for the cluster and region. + // + // +optional + Upstream URL `json:"upstream,omitempty"` + // channel is an identifier for explicitly requesting that a non-default + // set of updates be applied to this cluster. The default channel will be + // contain stable updates that are appropriate for production clusters. + // + // +optional + Channel string `json:"channel,omitempty"` + + // capabilities configures the installation of optional, core + // cluster components. A null value here is identical to an + // empty object; see the child properties for default semantics. + // +optional + Capabilities *ClusterVersionCapabilitiesSpec `json:"capabilities,omitempty"` + + // signatureStores contains the upstream URIs to verify release signatures and optional + // reference to a config map by name containing the PEM-encoded CA bundle. + // + // By default, CVO will use existing signature stores if this property is empty. + // The CVO will check the release signatures in the local ConfigMaps first. It will search for a valid signature + // in these stores in parallel only when local ConfigMaps did not include a valid signature. + // Validation will fail if none of the signature stores reply with valid signature before timeout. + // Setting signatureStores will replace the default signature stores with custom signature stores. + // Default stores can be used with custom signature stores by adding them manually. + // + // A maximum of 32 signature stores may be configured. + // +kubebuilder:validation:MaxItems=32 + // +openshift:enable:FeatureGate=SignatureStores + // +listType=map + // +listMapKey=url + // +optional + SignatureStores []SignatureStore `json:"signatureStores"` + + // overrides is list of overides for components that are managed by + // cluster version operator. Marking a component unmanaged will prevent + // the operator from creating or updating the object. + // +listType=map + // +listMapKey=kind + // +listMapKey=group + // +listMapKey=namespace + // +listMapKey=name + // +optional + Overrides []ComponentOverride `json:"overrides,omitempty"` +} + +// ClusterVersionStatus reports the status of the cluster versioning, +// including any upgrades that are in progress. The current field will +// be set to whichever version the cluster is reconciling to, and the +// conditions array will report whether the update succeeded, is in +// progress, or is failing. +// +k8s:deepcopy-gen=true +type ClusterVersionStatus struct { + // desired is the version that the cluster is reconciling towards. + // If the cluster is not yet fully initialized desired will be set + // with the information available, which may be an image or a tag. + // +required + Desired Release `json:"desired"` + + // history contains a list of the most recent versions applied to the cluster. + // This value may be empty during cluster startup, and then will be updated + // when a new update is being applied. The newest update is first in the + // list and it is ordered by recency. Updates in the history have state + // Completed if the rollout completed - if an update was failing or halfway + // applied the state will be Partial. Only a limited amount of update history + // is preserved. + // +listType=atomic + // +optional + History []UpdateHistory `json:"history,omitempty"` + + // observedGeneration reports which version of the spec is being synced. + // If this value is not equal to metadata.generation, then the desired + // and conditions fields may represent a previous version. + // +required + ObservedGeneration int64 `json:"observedGeneration"` + + // versionHash is a fingerprint of the content that the cluster will be + // updated with. It is used by the operator to avoid unnecessary work + // and is for internal use only. + // +required + VersionHash string `json:"versionHash"` + + // capabilities describes the state of optional, core cluster components. + Capabilities ClusterVersionCapabilitiesStatus `json:"capabilities"` + + // conditions provides information about the cluster version. The condition + // "Available" is set to true if the desiredUpdate has been reached. The + // condition "Progressing" is set to true if an update is being applied. + // The condition "Degraded" is set to true if an update is currently blocked + // by a temporary or permanent error. Conditions are only valid for the + // current desiredUpdate when metadata.generation is equal to + // status.generation. + // +listType=map + // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + + // availableUpdates contains updates recommended for this + // cluster. Updates which appear in conditionalUpdates but not in + // availableUpdates may expose this cluster to known issues. This list + // may be empty if no updates are recommended, if the update service + // is unavailable, or if an invalid channel has been specified. + // +nullable + // +listType=atomic + // +required + AvailableUpdates []Release `json:"availableUpdates"` + + // conditionalUpdates contains the list of updates that may be + // recommended for this cluster if it meets specific required + // conditions. Consumers interested in the set of updates that are + // actually recommended for this cluster should use + // availableUpdates. This list may be empty if no updates are + // recommended, if the update service is unavailable, or if an empty + // or invalid channel has been specified. + // +listType=atomic + // +optional + ConditionalUpdates []ConditionalUpdate `json:"conditionalUpdates,omitempty"` +} + +// UpdateState is a constant representing whether an update was successfully +// applied to the cluster or not. +type UpdateState string + +const ( + // CompletedUpdate indicates an update was successfully applied + // to the cluster (all resource updates were successful). + CompletedUpdate UpdateState = "Completed" + // PartialUpdate indicates an update was never completely applied + // or is currently being applied. + PartialUpdate UpdateState = "Partial" +) + +// UpdateHistory is a single attempted update to the cluster. +type UpdateHistory struct { + // state reflects whether the update was fully applied. The Partial state + // indicates the update is not fully applied, while the Completed state + // indicates the update was successfully rolled out at least once (all + // parts of the update successfully applied). + // +required + State UpdateState `json:"state"` + + // startedTime is the time at which the update was started. + // +required + StartedTime metav1.Time `json:"startedTime"` + + // completionTime, if set, is when the update was fully applied. The update + // that is currently being applied will have a null completion time. + // Completion time will always be set for entries that are not the current + // update (usually to the started time of the next update). + // +required + // +nullable + CompletionTime *metav1.Time `json:"completionTime"` + + // version is a semantic version identifying the update version. If the + // requested image does not define a version, or if a failure occurs + // retrieving the image, this value may be empty. + // + // +optional + Version string `json:"version"` + + // image is a container image location that contains the update. This value + // is always populated. + // +required + Image string `json:"image"` + + // verified indicates whether the provided update was properly verified + // before it was installed. If this is false the cluster may not be trusted. + // Verified does not cover upgradeable checks that depend on the cluster + // state at the time when the update target was accepted. + // +required + Verified bool `json:"verified"` + + // acceptedRisks records risks which were accepted to initiate the update. + // For example, it may menition an Upgradeable=False or missing signature + // that was overriden via desiredUpdate.force, or an update that was + // initiated despite not being in the availableUpdates set of recommended + // update targets. + // +optional + AcceptedRisks string `json:"acceptedRisks,omitempty"` +} + +// ClusterID is string RFC4122 uuid. +type ClusterID string + +// ClusterVersionArchitecture enumerates valid cluster architectures. +// +kubebuilder:validation:Enum="Multi";"" +type ClusterVersionArchitecture string + +const ( + // ClusterVersionArchitectureMulti identifies a multi architecture. A multi + // architecture cluster is capable of running nodes with multiple architectures. + ClusterVersionArchitectureMulti ClusterVersionArchitecture = "Multi" +) + +// ClusterVersionCapability enumerates optional, core cluster components. +// +kubebuilder:validation:Enum=openshift-samples;baremetal;marketplace;Console;Insights;Storage;CSISnapshot;NodeTuning;MachineAPI;Build;DeploymentConfig;ImageRegistry;OperatorLifecycleManager;CloudCredential;Ingress;CloudControllerManager;OperatorLifecycleManagerV1 +type ClusterVersionCapability string + +const ( + // ClusterVersionCapabilityOpenShiftSamples manages the sample + // image streams and templates stored in the openshift + // namespace, and any registry credentials, stored as a secret, + // needed for the image streams to import the images they + // reference. + ClusterVersionCapabilityOpenShiftSamples ClusterVersionCapability = "openshift-samples" + + // ClusterVersionCapabilityBaremetal manages the cluster + // baremetal operator which is responsible for running the metal3 + // deployment. + ClusterVersionCapabilityBaremetal ClusterVersionCapability = "baremetal" + + // ClusterVersionCapabilityMarketplace manages the Marketplace operator which + // supplies Operator Lifecycle Manager (OLM) users with default catalogs of + // "optional" operators. + // + // Note that Marketplace has a hard requirement on OLM. OLM can not be disabled + // while Marketplace is enabled. + ClusterVersionCapabilityMarketplace ClusterVersionCapability = "marketplace" + + // ClusterVersionCapabilityConsole manages the Console operator which + // installs and maintains the web console. + ClusterVersionCapabilityConsole ClusterVersionCapability = "Console" + + // ClusterVersionCapabilityInsights manages the Insights operator which + // collects anonymized information about the cluster to generate + // recommendations for possible cluster issues. + ClusterVersionCapabilityInsights ClusterVersionCapability = "Insights" + + // ClusterVersionCapabilityStorage manages the storage operator which + // is responsible for providing cluster-wide storage defaults + // WARNING: Do not disable this capability when deployed to + // RHEV and OpenStack without reading the docs. + // These clusters heavily rely on that capability and may cause + // damage to the cluster. + ClusterVersionCapabilityStorage ClusterVersionCapability = "Storage" + + // ClusterVersionCapabilityCSISnapshot manages the csi snapshot + // controller operator which is responsible for watching the + // VolumeSnapshot CRD objects and manages the creation and deletion + // lifecycle of volume snapshots + ClusterVersionCapabilityCSISnapshot ClusterVersionCapability = "CSISnapshot" + + // ClusterVersionCapabilityNodeTuning manages the Node Tuning Operator + // which is responsible for watching the Tuned and Profile CRD + // objects and manages the containerized TuneD daemon which controls + // system level tuning of Nodes + ClusterVersionCapabilityNodeTuning ClusterVersionCapability = "NodeTuning" + + // ClusterVersionCapabilityMachineAPI manages + // machine-api-operator + // cluster-autoscaler-operator + // cluster-control-plane-machine-set-operator + // which is responsible for machines configuration and heavily + // targeted for SNO clusters. + // + // The following CRDs are disabled as well + // machines + // machineset + // controlplanemachineset + // + // WARNING: Do not disable that capability without reading + // documentation. This is important part of openshift system + // and may cause cluster damage + ClusterVersionCapabilityMachineAPI ClusterVersionCapability = "MachineAPI" + + // ClusterVersionCapabilityBuild manages the Build API which is responsible + // for watching the Build API objects and managing their lifecycle. + // The functionality is located under openshift-apiserver and openshift-controller-manager. + // + // The following resources are taken into account: + // - builds + // - buildconfigs + ClusterVersionCapabilityBuild ClusterVersionCapability = "Build" + + // ClusterVersionCapabilityDeploymentConfig manages the DeploymentConfig API + // which is responsible for watching the DeploymentConfig API and managing their lifecycle. + // The functionality is located under openshift-apiserver and openshift-controller-manager. + // + // The following resources are taken into account: + // - deploymentconfigs + ClusterVersionCapabilityDeploymentConfig ClusterVersionCapability = "DeploymentConfig" + + // ClusterVersionCapabilityImageRegistry manages the image registry which + // allows to distribute Docker images + ClusterVersionCapabilityImageRegistry ClusterVersionCapability = "ImageRegistry" + + // ClusterVersionCapabilityOperatorLifecycleManager manages the Operator Lifecycle Manager (legacy) + // which itself manages the lifecycle of operators + ClusterVersionCapabilityOperatorLifecycleManager ClusterVersionCapability = "OperatorLifecycleManager" + + // ClusterVersionCapabilityOperatorLifecycleManagerV1 manages the Operator Lifecycle Manager (v1) + // which itself manages the lifecycle of operators + ClusterVersionCapabilityOperatorLifecycleManagerV1 ClusterVersionCapability = "OperatorLifecycleManagerV1" + + // ClusterVersionCapabilityCloudCredential manages credentials for cloud providers + // in openshift cluster + ClusterVersionCapabilityCloudCredential ClusterVersionCapability = "CloudCredential" + + // ClusterVersionCapabilityIngress manages the cluster ingress operator + // which is responsible for running the ingress controllers (including OpenShift router). + // + // The following CRDs are part of the capability as well: + // IngressController + // DNSRecord + // GatewayClass + // Gateway + // HTTPRoute + // ReferenceGrant + // + // WARNING: This capability cannot be disabled on the standalone OpenShift. + ClusterVersionCapabilityIngress ClusterVersionCapability = "Ingress" + + // ClusterVersionCapabilityCloudControllerManager manages various Cloud Controller + // Managers deployed on top of OpenShift. They help you to work with cloud + // provider API and embeds cloud-specific control logic. + ClusterVersionCapabilityCloudControllerManager ClusterVersionCapability = "CloudControllerManager" +) + +// KnownClusterVersionCapabilities includes all known optional, core cluster components. +var KnownClusterVersionCapabilities = []ClusterVersionCapability{ + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + ClusterVersionCapabilityMachineAPI, + ClusterVersionCapabilityBuild, + ClusterVersionCapabilityDeploymentConfig, + ClusterVersionCapabilityImageRegistry, + ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityOperatorLifecycleManagerV1, + ClusterVersionCapabilityCloudCredential, + ClusterVersionCapabilityIngress, + ClusterVersionCapabilityCloudControllerManager, +} + +// ClusterVersionCapabilitySet defines sets of cluster version capabilities. +// +kubebuilder:validation:Enum=None;v4.11;v4.12;v4.13;v4.14;v4.15;v4.16;v4.17;v4.18;vCurrent +type ClusterVersionCapabilitySet string + +const ( + // ClusterVersionCapabilitySetNone is an empty set enabling + // no optional capabilities. + ClusterVersionCapabilitySetNone ClusterVersionCapabilitySet = "None" + + // ClusterVersionCapabilitySet4_11 is the recommended set of + // optional capabilities to enable for the 4.11 version of + // OpenShift. This list will remain the same no matter which + // version of OpenShift is installed. + ClusterVersionCapabilitySet4_11 ClusterVersionCapabilitySet = "v4.11" + + // ClusterVersionCapabilitySet4_12 is the recommended set of + // optional capabilities to enable for the 4.12 version of + // OpenShift. This list will remain the same no matter which + // version of OpenShift is installed. + ClusterVersionCapabilitySet4_12 ClusterVersionCapabilitySet = "v4.12" + + // ClusterVersionCapabilitySet4_13 is the recommended set of + // optional capabilities to enable for the 4.13 version of + // OpenShift. This list will remain the same no matter which + // version of OpenShift is installed. + ClusterVersionCapabilitySet4_13 ClusterVersionCapabilitySet = "v4.13" + + // ClusterVersionCapabilitySet4_14 is the recommended set of + // optional capabilities to enable for the 4.14 version of + // OpenShift. This list will remain the same no matter which + // version of OpenShift is installed. + ClusterVersionCapabilitySet4_14 ClusterVersionCapabilitySet = "v4.14" + + // ClusterVersionCapabilitySet4_15 is the recommended set of + // optional capabilities to enable for the 4.15 version of + // OpenShift. This list will remain the same no matter which + // version of OpenShift is installed. + ClusterVersionCapabilitySet4_15 ClusterVersionCapabilitySet = "v4.15" + + // ClusterVersionCapabilitySet4_16 is the recommended set of + // optional capabilities to enable for the 4.16 version of + // OpenShift. This list will remain the same no matter which + // version of OpenShift is installed. + ClusterVersionCapabilitySet4_16 ClusterVersionCapabilitySet = "v4.16" + + // ClusterVersionCapabilitySet4_17 is the recommended set of + // optional capabilities to enable for the 4.17 version of + // OpenShift. This list will remain the same no matter which + // version of OpenShift is installed. + ClusterVersionCapabilitySet4_17 ClusterVersionCapabilitySet = "v4.17" + + // ClusterVersionCapabilitySet4_18 is the recommended set of + // optional capabilities to enable for the 4.18 version of + // OpenShift. This list will remain the same no matter which + // version of OpenShift is installed. + ClusterVersionCapabilitySet4_18 ClusterVersionCapabilitySet = "v4.18" + + // ClusterVersionCapabilitySetCurrent is the recommended set + // of optional capabilities to enable for the cluster's + // current version of OpenShift. + ClusterVersionCapabilitySetCurrent ClusterVersionCapabilitySet = "vCurrent" +) + +// ClusterVersionCapabilitySets defines sets of cluster version capabilities. +var ClusterVersionCapabilitySets = map[ClusterVersionCapabilitySet][]ClusterVersionCapability{ + ClusterVersionCapabilitySetNone: {}, + ClusterVersionCapabilitySet4_11: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityMachineAPI, + }, + ClusterVersionCapabilitySet4_12: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityMachineAPI, + }, + ClusterVersionCapabilitySet4_13: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + ClusterVersionCapabilityMachineAPI, + }, + ClusterVersionCapabilitySet4_14: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + ClusterVersionCapabilityMachineAPI, + ClusterVersionCapabilityBuild, + ClusterVersionCapabilityDeploymentConfig, + ClusterVersionCapabilityImageRegistry, + }, + ClusterVersionCapabilitySet4_15: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + ClusterVersionCapabilityMachineAPI, + ClusterVersionCapabilityBuild, + ClusterVersionCapabilityDeploymentConfig, + ClusterVersionCapabilityImageRegistry, + ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityCloudCredential, + }, + ClusterVersionCapabilitySet4_16: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + ClusterVersionCapabilityMachineAPI, + ClusterVersionCapabilityBuild, + ClusterVersionCapabilityDeploymentConfig, + ClusterVersionCapabilityImageRegistry, + ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityCloudCredential, + ClusterVersionCapabilityIngress, + ClusterVersionCapabilityCloudControllerManager, + }, + ClusterVersionCapabilitySet4_17: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + ClusterVersionCapabilityMachineAPI, + ClusterVersionCapabilityBuild, + ClusterVersionCapabilityDeploymentConfig, + ClusterVersionCapabilityImageRegistry, + ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityCloudCredential, + ClusterVersionCapabilityIngress, + ClusterVersionCapabilityCloudControllerManager, + }, + ClusterVersionCapabilitySet4_18: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + ClusterVersionCapabilityMachineAPI, + ClusterVersionCapabilityBuild, + ClusterVersionCapabilityDeploymentConfig, + ClusterVersionCapabilityImageRegistry, + ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityOperatorLifecycleManagerV1, + ClusterVersionCapabilityCloudCredential, + ClusterVersionCapabilityIngress, + ClusterVersionCapabilityCloudControllerManager, + }, + ClusterVersionCapabilitySetCurrent: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + ClusterVersionCapabilityMachineAPI, + ClusterVersionCapabilityBuild, + ClusterVersionCapabilityDeploymentConfig, + ClusterVersionCapabilityImageRegistry, + ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityOperatorLifecycleManagerV1, + ClusterVersionCapabilityCloudCredential, + ClusterVersionCapabilityIngress, + ClusterVersionCapabilityCloudControllerManager, + }, +} + +// ClusterVersionCapabilitiesSpec selects the managed set of +// optional, core cluster components. +// +k8s:deepcopy-gen=true +type ClusterVersionCapabilitiesSpec struct { + // baselineCapabilitySet selects an initial set of + // optional capabilities to enable, which can be extended via + // additionalEnabledCapabilities. If unset, the cluster will + // choose a default, and the default may change over time. + // The current default is vCurrent. + // +optional + BaselineCapabilitySet ClusterVersionCapabilitySet `json:"baselineCapabilitySet,omitempty"` + + // additionalEnabledCapabilities extends the set of managed + // capabilities beyond the baseline defined in + // baselineCapabilitySet. The default is an empty set. + // +listType=atomic + // +optional + AdditionalEnabledCapabilities []ClusterVersionCapability `json:"additionalEnabledCapabilities,omitempty"` +} + +// ClusterVersionCapabilitiesStatus describes the state of optional, +// core cluster components. +// +k8s:deepcopy-gen=true +type ClusterVersionCapabilitiesStatus struct { + // enabledCapabilities lists all the capabilities that are currently managed. + // +listType=atomic + // +optional + EnabledCapabilities []ClusterVersionCapability `json:"enabledCapabilities,omitempty"` + + // knownCapabilities lists all the capabilities known to the current cluster. + // +listType=atomic + // +optional + KnownCapabilities []ClusterVersionCapability `json:"knownCapabilities,omitempty"` +} + +// ComponentOverride allows overriding cluster version operator's behavior +// for a component. +// +k8s:deepcopy-gen=true +type ComponentOverride struct { + // kind indentifies which object to override. + // +required + Kind string `json:"kind"` + // group identifies the API group that the kind is in. + // +required + Group string `json:"group"` + + // namespace is the component's namespace. If the resource is cluster + // scoped, the namespace should be empty. + // +required + Namespace string `json:"namespace"` + // name is the component's name. + // +required + Name string `json:"name"` + + // unmanaged controls if cluster version operator should stop managing the + // resources in this cluster. + // Default: false + // +required + Unmanaged bool `json:"unmanaged"` +} + +// URL is a thin wrapper around string that ensures the string is a valid URL. +type URL string + +// Update represents an administrator update request. +// +kubebuilder:validation:XValidation:rule="has(self.architecture) && has(self.image) ? (self.architecture == \"\" || self.image == \"\") : true",message="cannot set both Architecture and Image" +// +kubebuilder:validation:XValidation:rule="has(self.architecture) && self.architecture != \"\" ? self.version != \"\" : true",message="Version must be set if Architecture is set" +// +k8s:deepcopy-gen=true +type Update struct { + // architecture is an optional field that indicates the desired + // value of the cluster architecture. In this context cluster + // architecture means either a single architecture or a multi + // architecture. architecture can only be set to Multi thereby + // only allowing updates from single to multi architecture. If + // architecture is set, image cannot be set and version must be + // set. + // Valid values are 'Multi' and empty. + // + // +optional + Architecture ClusterVersionArchitecture `json:"architecture"` + + // version is a semantic version identifying the update version. + // version is ignored if image is specified and required if + // architecture is specified. + // + // +optional + Version string `json:"version"` + + // image is a container image location that contains the update. + // image should be used when the desired version does not exist in availableUpdates or history. + // When image is set, version is ignored. When image is set, version should be empty. + // When image is set, architecture cannot be specified. + // + // +optional + Image string `json:"image"` + + // force allows an administrator to update to an image that has failed + // verification or upgradeable checks. This option should only + // be used when the authenticity of the provided image has been verified out + // of band because the provided image will run with full administrative access + // to the cluster. Do not use this flag with images that comes from unknown + // or potentially malicious sources. + // + // +optional + Force bool `json:"force"` +} + +// Release represents an OpenShift release image and associated metadata. +// +k8s:deepcopy-gen=true +type Release struct { + // architecture is an optional field that indicates the + // value of the cluster architecture. In this context cluster + // architecture means either a single architecture or a multi + // architecture. + // Valid values are 'Multi' and empty. + // + // +openshift:enable:FeatureGate=ImageStreamImportMode + // +optional + Architecture ClusterVersionArchitecture `json:"architecture,omitempty"` + + // version is a semantic version identifying the update version. When this + // field is part of spec, version is optional if image is specified. + // +required + Version string `json:"version"` + + // image is a container image location that contains the update. When this + // field is part of spec, image is optional if version is specified and the + // availableUpdates field contains a matching version. + // +required + Image string `json:"image"` + + // url contains information about this release. This URL is set by + // the 'url' metadata property on a release or the metadata returned by + // the update API and should be displayed as a link in user + // interfaces. The URL field may not be set for test or nightly + // releases. + // +optional + URL URL `json:"url,omitempty"` + + // channels is the set of Cincinnati channels to which the release + // currently belongs. + // +listType=set + // +optional + Channels []string `json:"channels,omitempty"` +} + +// RetrievedUpdates reports whether available updates have been retrieved from +// the upstream update server. The condition is Unknown before retrieval, False +// if the updates could not be retrieved or recently failed, or True if the +// availableUpdates field is accurate and recent. +const RetrievedUpdates ClusterStatusConditionType = "RetrievedUpdates" + +// ConditionalUpdate represents an update which is recommended to some +// clusters on the version the current cluster is reconciling, but which +// may not be recommended for the current cluster. +type ConditionalUpdate struct { + // release is the target of the update. + // +required + Release Release `json:"release"` + + // risks represents the range of issues associated with + // updating to the target release. The cluster-version + // operator will evaluate all entries, and only recommend the + // update if there is at least one entry and all entries + // recommend the update. + // +kubebuilder:validation:MinItems=1 + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +required + Risks []ConditionalUpdateRisk `json:"risks" patchStrategy:"merge" patchMergeKey:"name"` + + // conditions represents the observations of the conditional update's + // current status. Known types are: + // * Recommended, for whether the update is recommended for the current cluster. + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +// ConditionalUpdateRisk represents a reason and cluster-state +// for not recommending a conditional update. +// +k8s:deepcopy-gen=true +type ConditionalUpdateRisk struct { + // url contains information about this risk. + // +kubebuilder:validation:Format=uri + // +kubebuilder:validation:MinLength=1 + // +required + URL string `json:"url"` + + // name is the CamelCase reason for not recommending a + // conditional update, in the event that matchingRules match the + // cluster state. + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name"` + + // message provides additional information about the risk of + // updating, in the event that matchingRules match the cluster + // state. This is only to be consumed by humans. It may + // contain Line Feed characters (U+000A), which should be + // rendered as new lines. + // +kubebuilder:validation:MinLength=1 + // +required + Message string `json:"message"` + + // matchingRules is a slice of conditions for deciding which + // clusters match the risk and which do not. The slice is + // ordered by decreasing precedence. The cluster-version + // operator will walk the slice in order, and stop after the + // first it can successfully evaluate. If no condition can be + // successfully evaluated, the update will not be recommended. + // +kubebuilder:validation:MinItems=1 + // +listType=atomic + // +required + MatchingRules []ClusterCondition `json:"matchingRules"` +} + +// ClusterCondition is a union of typed cluster conditions. The 'type' +// property determines which of the type-specific properties are relevant. +// When evaluated on a cluster, the condition may match, not match, or +// fail to evaluate. +// +k8s:deepcopy-gen=true +type ClusterCondition struct { + // type represents the cluster-condition type. This defines + // the members and semantics of any additional properties. + // +kubebuilder:validation:Enum={"Always","PromQL"} + // +required + Type string `json:"type"` + + // promql represents a cluster condition based on PromQL. + // +optional + PromQL *PromQLClusterCondition `json:"promql,omitempty"` +} + +// PromQLClusterCondition represents a cluster condition based on PromQL. +type PromQLClusterCondition struct { + // promql is a PromQL query classifying clusters. This query + // query should return a 1 in the match case and a 0 in the + // does-not-match case. Queries which return no time + // series, or which return values besides 0 or 1, are + // evaluation failures. + // +required + PromQL string `json:"promql"` +} + +// ClusterVersionList is a list of ClusterVersion resources. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:compatibility-gen:level=1 +type ClusterVersionList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []ClusterVersion `json:"items"` +} + +// SignatureStore represents the URL of custom Signature Store +type SignatureStore struct { + + // url contains the upstream custom signature store URL. + // url should be a valid absolute http/https URI of an upstream signature store as per rfc1738. + // This must be provided and cannot be empty. + // + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="url must be a valid absolute URL" + // +required + URL string `json:"url"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the signature store is not honored. + // If the specified ca data is not valid, the signature store is not honored. + // If empty, we fall back to the CA configured via Proxy, which is appended to the default system roots. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go new file mode 100644 index 0000000000000..0ccc4a8f85e4e --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_console.go @@ -0,0 +1,80 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Console holds cluster-wide configuration for the web console, including the +// logout URL, and reports the public URL of the console. The canonical name is +// `cluster`. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=consoles,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type Console struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec ConsoleSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ConsoleStatus `json:"status"` +} + +// ConsoleSpec is the specification of the desired behavior of the Console. +type ConsoleSpec struct { + // +optional + Authentication ConsoleAuthentication `json:"authentication"` +} + +// ConsoleStatus defines the observed status of the Console. +type ConsoleStatus struct { + // The URL for the console. This will be derived from the host for the route that + // is created for the console. + ConsoleURL string `json:"consoleURL"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ConsoleList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Console `json:"items"` +} + +// ConsoleAuthentication defines a list of optional configuration for console authentication. +type ConsoleAuthentication struct { + // An optional, absolute URL to redirect web browsers to after logging out of + // the console. If not specified, it will redirect to the default login page. + // This is required when using an identity provider that supports single + // sign-on (SSO) such as: + // - OpenID (Keycloak, Azure) + // - RequestHeader (GSSAPI, SSPI, SAML) + // - OAuth (GitHub, GitLab, Google) + // Logging out of the console will destroy the user's token. The logoutRedirect + // provides the user the option to perform single logout (SLO) through the identity + // provider to destroy their single sign-on session. + // +optional + // +kubebuilder:validation:Pattern=`^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$` + LogoutRedirect string `json:"logoutRedirect,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_dns.go b/vendor/github.com/openshift/api/config/v1/types_dns.go new file mode 100644 index 0000000000000..06eb75ccf7092 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_dns.go @@ -0,0 +1,140 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DNS holds cluster-wide information about DNS. The canonical name is `cluster` +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=dnses,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type DNS struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec DNSSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status DNSStatus `json:"status"` +} + +type DNSSpec struct { + // baseDomain is the base domain of the cluster. All managed DNS records will + // be sub-domains of this base. + // + // For example, given the base domain `openshift.example.com`, an API server + // DNS record may be created for `cluster-api.openshift.example.com`. + // + // Once set, this field cannot be changed. + BaseDomain string `json:"baseDomain"` + // publicZone is the location where all the DNS records that are publicly accessible to + // the internet exist. + // + // If this field is nil, no public records should be created. + // + // Once set, this field cannot be changed. + // + // +optional + PublicZone *DNSZone `json:"publicZone,omitempty"` + // privateZone is the location where all the DNS records that are only available internally + // to the cluster exist. + // + // If this field is nil, no private records should be created. + // + // Once set, this field cannot be changed. + // + // +optional + PrivateZone *DNSZone `json:"privateZone,omitempty"` + // platform holds configuration specific to the underlying + // infrastructure provider for DNS. + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // +optional + Platform DNSPlatformSpec `json:"platform,omitempty"` +} + +// DNSZone is used to define a DNS hosted zone. +// A zone can be identified by an ID or tags. +type DNSZone struct { + // id is the identifier that can be used to find the DNS hosted zone. + // + // on AWS zone can be fetched using `ID` as id in [1] + // on Azure zone can be fetched using `ID` as a pre-determined name in [2], + // on GCP zone can be fetched using `ID` as a pre-determined name in [3]. + // + // [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options + // [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show + // [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get + // +optional + ID string `json:"id,omitempty"` + + // tags can be used to query the DNS hosted zone. + // + // on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, + // + // [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options + // +optional + Tags map[string]string `json:"tags,omitempty"` +} + +type DNSStatus struct { + // dnsSuffix (service-ca amongst others) +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type DNSList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []DNS `json:"items"` +} + +// DNSPlatformSpec holds cloud-provider-specific configuration +// for DNS administration. +// +union +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'AWS' ? has(self.aws) : !has(self.aws)",message="aws configuration is required when platform is AWS, and forbidden otherwise" +type DNSPlatformSpec struct { + // type is the underlying infrastructure provider for the cluster. + // Allowed values: "", "AWS". + // + // Individual components may not support all platforms, + // and must handle unrecognized platforms with best-effort defaults. + // + // +unionDiscriminator + // +required + // +kubebuilder:validation:XValidation:rule="self in ['','AWS']",message="allowed values are '' and 'AWS'" + Type PlatformType `json:"type"` + + // aws contains DNS configuration specific to the Amazon Web Services cloud provider. + // +optional + AWS *AWSDNSSpec `json:"aws"` +} + +// AWSDNSSpec contains DNS configuration specific to the Amazon Web Services cloud provider. +type AWSDNSSpec struct { + // privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing + // operations on the cluster's private hosted zone specified in the cluster DNS config. + // When left empty, no role should be assumed. + // +kubebuilder:validation:Pattern:=`^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$` + // +optional + PrivateZoneIAMRole string `json:"privateZoneIAMRole"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go new file mode 100644 index 0000000000000..81bc14f2c747a --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -0,0 +1,151 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Feature holds cluster-wide information about feature gates. The canonical name is `cluster` +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=featuregates,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type FeatureGate struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + // +kubebuilder:validation:XValidation:rule="has(oldSelf.featureSet) ? has(self.featureSet) : true",message=".spec.featureSet cannot be removed" + Spec FeatureGateSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status FeatureGateStatus `json:"status"` +} + +type FeatureSet string + +var ( + // Default feature set that allows upgrades. + Default FeatureSet = "" + + // TechPreviewNoUpgrade turns on tech preview features that are not part of the normal supported platform. Turning + // this feature set on CANNOT BE UNDONE and PREVENTS UPGRADES. + TechPreviewNoUpgrade FeatureSet = "TechPreviewNoUpgrade" + + // DevPreviewNoUpgrade turns on dev preview features that are not part of the normal supported platform. Turning + // this feature set on CANNOT BE UNDONE and PREVENTS UPGRADES. + DevPreviewNoUpgrade FeatureSet = "DevPreviewNoUpgrade" + + // CustomNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. + // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations + // your cluster may fail in an unrecoverable way. + CustomNoUpgrade FeatureSet = "CustomNoUpgrade" + + // AllFixedFeatureSets are the featuresets that have known featuregates. Custom doesn't for instance. LatencySensitive is dead + AllFixedFeatureSets = []FeatureSet{Default, TechPreviewNoUpgrade, DevPreviewNoUpgrade} +) + +type FeatureGateSpec struct { + FeatureGateSelection `json:",inline"` +} + +// +union +type FeatureGateSelection struct { + // featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. + // Turning on or off features may cause irreversible changes in your cluster which cannot be undone. + // +unionDiscriminator + // +optional + // +kubebuilder:validation:Enum=CustomNoUpgrade;DevPreviewNoUpgrade;TechPreviewNoUpgrade;"" + // +kubebuilder:validation:XValidation:rule="oldSelf == 'CustomNoUpgrade' ? self == 'CustomNoUpgrade' : true",message="CustomNoUpgrade may not be changed" + // +kubebuilder:validation:XValidation:rule="oldSelf == 'TechPreviewNoUpgrade' ? self == 'TechPreviewNoUpgrade' : true",message="TechPreviewNoUpgrade may not be changed" + // +kubebuilder:validation:XValidation:rule="oldSelf == 'DevPreviewNoUpgrade' ? self == 'DevPreviewNoUpgrade' : true",message="DevPreviewNoUpgrade may not be changed" + FeatureSet FeatureSet `json:"featureSet,omitempty"` + + // customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. + // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations + // your cluster may fail in an unrecoverable way. featureSet must equal "CustomNoUpgrade" must be set to use this field. + // +optional + // +nullable + CustomNoUpgrade *CustomFeatureGates `json:"customNoUpgrade,omitempty"` +} + +type CustomFeatureGates struct { + // enabled is a list of all feature gates that you want to force on + // +optional + Enabled []FeatureGateName `json:"enabled,omitempty"` + // disabled is a list of all feature gates that you want to force off + // +optional + Disabled []FeatureGateName `json:"disabled,omitempty"` +} + +// FeatureGateName is a string to enforce patterns on the name of a FeatureGate +// +kubebuilder:validation:Pattern=`^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$` +type FeatureGateName string + +type FeatureGateStatus struct { + // conditions represent the observations of the current state. + // Known .status.conditions.type are: "DeterminationDegraded" + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // featureGates contains a list of enabled and disabled featureGates that are keyed by payloadVersion. + // Operators other than the CVO and cluster-config-operator, must read the .status.featureGates, locate + // the version they are managing, find the enabled/disabled featuregates and make the operand and operator match. + // The enabled/disabled values for a particular version may change during the life of the cluster as various + // .spec.featureSet values are selected. + // Operators may choose to restart their processes to pick up these changes, but remembering past enable/disable + // lists is beyond the scope of this API and is the responsibility of individual operators. + // Only featureGates with .version in the ClusterVersion.status will be present in this list. + // +listType=map + // +listMapKey=version + FeatureGates []FeatureGateDetails `json:"featureGates"` +} + +type FeatureGateDetails struct { + // version matches the version provided by the ClusterVersion and in the ClusterOperator.Status.Versions field. + // +required + Version string `json:"version"` + // enabled is a list of all feature gates that are enabled in the cluster for the named version. + // +optional + Enabled []FeatureGateAttributes `json:"enabled"` + // disabled is a list of all feature gates that are disabled in the cluster for the named version. + // +optional + Disabled []FeatureGateAttributes `json:"disabled"` +} + +type FeatureGateAttributes struct { + // name is the name of the FeatureGate. + // +required + Name FeatureGateName `json:"name"` + + // possible (probable?) future additions include + // 1. support level (Stable, ServiceDeliveryOnly, TechPreview, DevPreview) + // 2. description +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type FeatureGateList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []FeatureGate `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_image.go b/vendor/github.com/openshift/api/config/v1/types_image.go new file mode 100644 index 0000000000000..3db935c7fe43d --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_image.go @@ -0,0 +1,189 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Image governs policies related to imagestream imports and runtime configuration +// for external registries. It allows cluster admins to configure which registries +// OpenShift is allowed to import images from, extra CA trust bundles for external +// registries, and policies to block or allow registry hostnames. +// When exposing OpenShift's image registry to the public, this also lets cluster +// admins specify the external hostname. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=images,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type Image struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec ImageSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ImageStatus `json:"status"` +} + +// ImportModeType describes how to import an image manifest. +// +enum +// +kubebuilder:validation:Enum:="";Legacy;PreserveOriginal +type ImportModeType string + +const ( + // ImportModeLegacy indicates that the legacy behaviour should be used. + // For manifest lists, the legacy behaviour will discard the manifest list and import a single + // sub-manifest. In this case, the platform is chosen in the following order of priority: + // 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list. + // This mode is the default. + ImportModeLegacy ImportModeType = "Legacy" + // ImportModePreserveOriginal indicates that the original manifest will be preserved. + // For manifest lists, the manifest list and all its sub-manifests will be imported. + ImportModePreserveOriginal ImportModeType = "PreserveOriginal" +) + +type ImageSpec struct { + // allowedRegistriesForImport limits the container image registries that normal users may import + // images from. Set this list to the registries that you trust to contain valid Docker + // images and that you want applications to be able to import from. Users with + // permission to create Images or ImageStreamMappings via the API are not affected by + // this policy - typically only administrators or system integrations will have those + // permissions. + // +optional + // +listType=atomic + AllowedRegistriesForImport []RegistryLocation `json:"allowedRegistriesForImport,omitempty"` + + // externalRegistryHostnames provides the hostnames for the default external image + // registry. The external hostname should be set only when the image registry + // is exposed externally. The first value is used in 'publicDockerImageRepository' + // field in ImageStreams. The value must be in "hostname[:port]" format. + // +optional + // +listType=atomic + ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"` + + // additionalTrustedCA is a reference to a ConfigMap containing additional CAs that + // should be trusted during imagestream import, pod image pull, build image pull, and + // imageregistry pullthrough. + // The namespace for this config map is openshift-config. + // +optional + AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"` + + // registrySources contains configuration that determines how the container runtime + // should treat individual registries when accessing images for builds+pods. (e.g. + // whether or not to allow insecure access). It does not contain configuration for the + // internal cluster registry. + // +optional + RegistrySources RegistrySources `json:"registrySources"` + + // imageStreamImportMode controls the import mode behaviour of imagestreams. + // It can be set to `Legacy` or `PreserveOriginal` or the empty string. If this value + // is specified, this setting is applied to all newly created imagestreams which do not have the + // value set. `Legacy` indicates that the legacy behaviour should be used. + // For manifest lists, the legacy behaviour will discard the manifest list and import a single + // sub-manifest. In this case, the platform is chosen in the following order of priority: + // 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list. + // `PreserveOriginal` indicates that the original manifest will be preserved. For manifest lists, + // the manifest list and all its sub-manifests will be imported. When empty, the behaviour will be + // decided based on the payload type advertised by the ClusterVersion status, i.e single arch payload + // implies the import mode is Legacy and multi payload implies PreserveOriginal. + // +openshift:enable:FeatureGate=ImageStreamImportMode + // +optional + ImageStreamImportMode ImportModeType `json:"imageStreamImportMode"` +} + +type ImageStatus struct { + // internalRegistryHostname sets the hostname for the default internal image + // registry. The value must be in "hostname[:port]" format. + // This value is set by the image registry operator which controls the internal registry + // hostname. + // +optional + InternalRegistryHostname string `json:"internalRegistryHostname,omitempty"` + + // externalRegistryHostnames provides the hostnames for the default external image + // registry. The external hostname should be set only when the image registry + // is exposed externally. The first value is used in 'publicDockerImageRepository' + // field in ImageStreams. The value must be in "hostname[:port]" format. + // +optional + // +listType=atomic + ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"` + + // imageStreamImportMode controls the import mode behaviour of imagestreams. It can be + // `Legacy` or `PreserveOriginal`. `Legacy` indicates that the legacy behaviour should be used. + // For manifest lists, the legacy behaviour will discard the manifest list and import a single + // sub-manifest. In this case, the platform is chosen in the following order of priority: + // 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list. + // `PreserveOriginal` indicates that the original manifest will be preserved. For manifest lists, + // the manifest list and all its sub-manifests will be imported. This value will be reconciled based + // on either the spec value or if no spec value is specified, the image registry operator would look + // at the ClusterVersion status to determine the payload type and set the import mode accordingly, + // i.e single arch payload implies the import mode is Legacy and multi payload implies PreserveOriginal. + // +openshift:enable:FeatureGate=ImageStreamImportMode + // +optional + ImageStreamImportMode ImportModeType `json:"imageStreamImportMode,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Image `json:"items"` +} + +// RegistryLocation contains a location of the registry specified by the registry domain +// name. The domain name might include wildcards, like '*' or '??'. +type RegistryLocation struct { + // domainName specifies a domain name for the registry + // In case the registry use non-standard (80 or 443) port, the port should be included + // in the domain name as well. + DomainName string `json:"domainName"` + // insecure indicates whether the registry is secure (https) or insecure (http) + // By default (if not specified) the registry is assumed as secure. + // +optional + Insecure bool `json:"insecure,omitempty"` +} + +// RegistrySources holds cluster-wide information about how to handle the registries config. +type RegistrySources struct { + // insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections. + // +optional + // +listType=atomic + InsecureRegistries []string `json:"insecureRegistries,omitempty"` + // blockedRegistries cannot be used for image pull and push actions. All other registries are permitted. + // + // Only one of BlockedRegistries or AllowedRegistries may be set. + // +optional + // +listType=atomic + BlockedRegistries []string `json:"blockedRegistries,omitempty"` + // allowedRegistries are the only registries permitted for image pull and push actions. All other registries are denied. + // + // Only one of BlockedRegistries or AllowedRegistries may be set. + // +optional + // +listType=atomic + AllowedRegistries []string `json:"allowedRegistries,omitempty"` + // containerRuntimeSearchRegistries are registries that will be searched when pulling images that do not have fully qualified + // domains in their pull specs. Registries will be searched in the order provided in the list. + // Note: this search list only works with the container runtime, i.e CRI-O. Will NOT work with builds or imagestream imports. + // +optional + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:Format=hostname + // +listType=set + ContainerRuntimeSearchRegistries []string `json:"containerRuntimeSearchRegistries,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go new file mode 100644 index 0000000000000..0bd0d77705550 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go @@ -0,0 +1,99 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageContentPolicy holds cluster-wide information about how to handle registry mirror rules. +// When multiple policies are defined, the outcome of the behavior is defined on each field. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/874 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=imagecontentpolicies,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type ImageContentPolicy struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec ImageContentPolicySpec `json:"spec"` +} + +// ImageContentPolicySpec is the specification of the ImageContentPolicy CRD. +type ImageContentPolicySpec struct { + // repositoryDigestMirrors allows images referenced by image digests in pods to be + // pulled from alternative mirrored repository locations. The image pull specification + // provided to the pod will be compared to the source locations described in RepositoryDigestMirrors + // and the image may be pulled down from any of the mirrors in the list instead of the + // specified repository allowing administrators to choose a potentially faster mirror. + // To pull image from mirrors by tags, should set the "allowMirrorByTags". + // + // Each “source” repository is treated independently; configurations for different “source” + // repositories don’t interact. + // + // If the "mirrors" is not specified, the image will continue to be pulled from the specified + // repository in the pull spec. + // + // When multiple policies are defined for the same “source” repository, the sets of defined + // mirrors will be merged together, preserving the relative order of the mirrors, if possible. + // For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the + // mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict + // (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. + // +optional + // +listType=map + // +listMapKey=source + RepositoryDigestMirrors []RepositoryDigestMirrors `json:"repositoryDigestMirrors"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageContentPolicyList lists the items in the ImageContentPolicy CRD. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageContentPolicyList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []ImageContentPolicy `json:"items"` +} + +// RepositoryDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config. +type RepositoryDigestMirrors struct { + // source is the repository that users refer to, e.g. in image pull specifications. + // +required + // +kubebuilder:validation:Pattern=`^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$` + Source string `json:"source"` + // allowMirrorByTags if true, the mirrors can be used to pull the images that are referenced by their tags. Default is false, the mirrors only work when pulling the images that are referenced by their digests. + // Pulling images by tag can potentially yield different images, depending on which endpoint + // we pull from. Forcing digest-pulls for mirrors avoids that issue. + // +optional + AllowMirrorByTags bool `json:"allowMirrorByTags,omitempty"` + // mirrors is zero or more repositories that may also contain the same images. + // If the "mirrors" is not specified, the image will continue to be pulled from the specified + // repository in the pull spec. No mirror will be configured. + // The order of mirrors in this list is treated as the user's desired priority, while source + // is by default considered lower priority than all mirrors. Other cluster configuration, + // including (but not limited to) other repositoryDigestMirrors objects, + // may impact the exact order mirrors are contacted in, or some mirrors may be contacted + // in parallel, so this should be considered a preference rather than a guarantee of ordering. + // +optional + // +listType=set + Mirrors []Mirror `json:"mirrors,omitempty"` +} + +// +kubebuilder:validation:Pattern=`^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$` +type Mirror string diff --git a/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go new file mode 100644 index 0000000000000..df2258d12fcf0 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go @@ -0,0 +1,141 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageDigestMirrorSet holds cluster-wide information about how to handle registry mirror rules on using digest pull specification. +// When multiple policies are defined, the outcome of the behavior is defined on each field. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1126 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=imagedigestmirrorsets,scope=Cluster,shortName=idms +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type ImageDigestMirrorSet struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec ImageDigestMirrorSetSpec `json:"spec"` + // status contains the observed state of the resource. + // +optional + Status ImageDigestMirrorSetStatus `json:"status,omitempty"` +} + +// ImageDigestMirrorSetSpec is the specification of the ImageDigestMirrorSet CRD. +type ImageDigestMirrorSetSpec struct { + // imageDigestMirrors allows images referenced by image digests in pods to be + // pulled from alternative mirrored repository locations. The image pull specification + // provided to the pod will be compared to the source locations described in imageDigestMirrors + // and the image may be pulled down from any of the mirrors in the list instead of the + // specified repository allowing administrators to choose a potentially faster mirror. + // To use mirrors to pull images using tag specification, users should configure + // a list of mirrors using "ImageTagMirrorSet" CRD. + // + // If the image pull specification matches the repository of "source" in multiple imagedigestmirrorset objects, + // only the objects which define the most specific namespace match will be used. + // For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as + // the "source", only the objects using quay.io/libpod/busybox are going to apply + // for pull specification quay.io/libpod/busybox. + // Each “source” repository is treated independently; configurations for different “source” + // repositories don’t interact. + // + // If the "mirrors" is not specified, the image will continue to be pulled from the specified + // repository in the pull spec. + // + // When multiple policies are defined for the same “source” repository, the sets of defined + // mirrors will be merged together, preserving the relative order of the mirrors, if possible. + // For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the + // mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict + // (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. + // Users who want to use a specific order of mirrors, should configure them into one list of mirrors using the expected order. + // +optional + // +listType=atomic + ImageDigestMirrors []ImageDigestMirrors `json:"imageDigestMirrors"` +} + +type ImageDigestMirrorSetStatus struct{} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageDigestMirrorSetList lists the items in the ImageDigestMirrorSet CRD. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageDigestMirrorSetList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []ImageDigestMirrorSet `json:"items"` +} + +// +kubebuilder:validation:Pattern=`^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$` +type ImageMirror string + +// MirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. +// +kubebuilder:validation:Enum=NeverContactSource;AllowContactingSource +type MirrorSourcePolicy string + +const ( + // NeverContactSource prevents image pull from the specified repository in the pull spec if the image pull from the mirror list fails. + NeverContactSource MirrorSourcePolicy = "NeverContactSource" + + // AllowContactingSource allows falling back to the specified repository in the pull spec if the image pull from the mirror list fails. + AllowContactingSource MirrorSourcePolicy = "AllowContactingSource" +) + +// ImageDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config. +type ImageDigestMirrors struct { + // source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname + // e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. + // "source" uses one of the following formats: + // host[:port] + // host[:port]/namespace[/namespace…] + // host[:port]/namespace[/namespace…]/repo + // [*.]host + // for more information about the format, see the document about the location field: + // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table + // +required + // +kubebuilder:validation:Pattern=`^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$` + Source string `json:"source"` + // mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. + // Images can be pulled from these mirrors only if they are referenced by their digests. + // The mirrored location is obtained by replacing the part of the input reference that + // matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, + // a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo + // repository to be used. + // The order of mirrors in this list is treated as the user's desired priority, while source + // is by default considered lower priority than all mirrors. + // If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be + // pulled from the repository in the pull spec unless explicitly prohibited by "mirrorSourcePolicy" + // Other cluster configuration, including (but not limited to) other imageDigestMirrors objects, + // may impact the exact order mirrors are contacted in, or some mirrors may be contacted + // in parallel, so this should be considered a preference rather than a guarantee of ordering. + // "mirrors" uses one of the following formats: + // host[:port] + // host[:port]/namespace[/namespace…] + // host[:port]/namespace[/namespace…]/repo + // for more information about the format, see the document about the location field: + // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table + // +optional + // +listType=set + Mirrors []ImageMirror `json:"mirrors,omitempty"` + // mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. + // If unset, the image will continue to be pulled from the the repository in the pull spec. + // sourcePolicy is valid configuration only when one or more mirrors are in the mirror list. + // +optional + MirrorSourcePolicy MirrorSourcePolicy `json:"mirrorSourcePolicy,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go new file mode 100644 index 0000000000000..b7e1a6a873282 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go @@ -0,0 +1,128 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageTagMirrorSet holds cluster-wide information about how to handle registry mirror rules on using tag pull specification. +// When multiple policies are defined, the outcome of the behavior is defined on each field. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1126 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=imagetagmirrorsets,scope=Cluster,shortName=itms +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type ImageTagMirrorSet struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec ImageTagMirrorSetSpec `json:"spec"` + // status contains the observed state of the resource. + // +optional + Status ImageTagMirrorSetStatus `json:"status,omitempty"` +} + +// ImageTagMirrorSetSpec is the specification of the ImageTagMirrorSet CRD. +type ImageTagMirrorSetSpec struct { + // imageTagMirrors allows images referenced by image tags in pods to be + // pulled from alternative mirrored repository locations. The image pull specification + // provided to the pod will be compared to the source locations described in imageTagMirrors + // and the image may be pulled down from any of the mirrors in the list instead of the + // specified repository allowing administrators to choose a potentially faster mirror. + // To use mirrors to pull images using digest specification only, users should configure + // a list of mirrors using "ImageDigestMirrorSet" CRD. + // + // If the image pull specification matches the repository of "source" in multiple imagetagmirrorset objects, + // only the objects which define the most specific namespace match will be used. + // For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as + // the "source", only the objects using quay.io/libpod/busybox are going to apply + // for pull specification quay.io/libpod/busybox. + // Each “source” repository is treated independently; configurations for different “source” + // repositories don’t interact. + // + // If the "mirrors" is not specified, the image will continue to be pulled from the specified + // repository in the pull spec. + // + // When multiple policies are defined for the same “source” repository, the sets of defined + // mirrors will be merged together, preserving the relative order of the mirrors, if possible. + // For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the + // mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict + // (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. + // Users who want to use a deterministic order of mirrors, should configure them into one list of mirrors using the expected order. + // +optional + // +listType=atomic + ImageTagMirrors []ImageTagMirrors `json:"imageTagMirrors"` +} + +type ImageTagMirrorSetStatus struct{} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageTagMirrorSetList lists the items in the ImageTagMirrorSet CRD. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageTagMirrorSetList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []ImageTagMirrorSet `json:"items"` +} + +// ImageTagMirrors holds cluster-wide information about how to handle mirrors in the registries config. +type ImageTagMirrors struct { + // source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname + // e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. + // "source" uses one of the following formats: + // host[:port] + // host[:port]/namespace[/namespace…] + // host[:port]/namespace[/namespace…]/repo + // [*.]host + // for more information about the format, see the document about the location field: + // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table + // +required + // +kubebuilder:validation:Pattern=`^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$` + Source string `json:"source"` + // mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. + // Images can be pulled from these mirrors only if they are referenced by their tags. + // The mirrored location is obtained by replacing the part of the input reference that + // matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, + // a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo + // repository to be used. + // Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. + // Configuring a list of mirrors using "ImageDigestMirrorSet" CRD and forcing digest-pulls for mirrors avoids that issue. + // The order of mirrors in this list is treated as the user's desired priority, while source + // is by default considered lower priority than all mirrors. + // If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be + // pulled from the repository in the pull spec unless explicitly prohibited by "mirrorSourcePolicy". + // Other cluster configuration, including (but not limited to) other imageTagMirrors objects, + // may impact the exact order mirrors are contacted in, or some mirrors may be contacted + // in parallel, so this should be considered a preference rather than a guarantee of ordering. + // "mirrors" uses one of the following formats: + // host[:port] + // host[:port]/namespace[/namespace…] + // host[:port]/namespace[/namespace…]/repo + // for more information about the format, see the document about the location field: + // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table + // +optional + // +listType=set + Mirrors []ImageMirror `json:"mirrors,omitempty"` + // mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. + // If unset, the image will continue to be pulled from the repository in the pull spec. + // sourcePolicy is valid configuration only when one or more mirrors are in the mirror list. + // +optional + MirrorSourcePolicy MirrorSourcePolicy `json:"mirrorSourcePolicy,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go new file mode 100644 index 0000000000000..0293603d78da1 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -0,0 +1,2018 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:subresource:status + +// Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster` +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=infrastructures,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type Infrastructure struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec InfrastructureSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status InfrastructureStatus `json:"status"` +} + +// InfrastructureSpec contains settings that apply to the cluster infrastructure. +type InfrastructureSpec struct { + // cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. + // This configuration file is used to configure the Kubernetes cloud provider integration + // when using the built-in cloud provider integration or the external cloud controller manager. + // The namespace for this config map is openshift-config. + // + // cloudConfig should only be consumed by the kube_cloud_config controller. + // The controller is responsible for using the user configuration in the spec + // for various platforms and combining that with the user provided ConfigMap in this field + // to create a stitched kube cloud config. + // The controller generates a ConfigMap `kube-cloud-config` in `openshift-config-managed` namespace + // with the kube cloud config is stored in `cloud.conf` key. + // All the clients are expected to use the generated ConfigMap only. + // + // +optional + CloudConfig ConfigMapFileReference `json:"cloudConfig"` + + // platformSpec holds desired information specific to the underlying + // infrastructure provider. + PlatformSpec PlatformSpec `json:"platformSpec,omitempty"` +} + +// InfrastructureStatus describes the infrastructure the cluster is leveraging. +type InfrastructureStatus struct { + // infrastructureName uniquely identifies a cluster with a human friendly name. + // Once set it should not be changed. Must be of max length 27 and must have only + // alphanumeric or hyphen characters. + InfrastructureName string `json:"infrastructureName"` + + // platform is the underlying infrastructure provider for the cluster. + // + // Deprecated: Use platformStatus.type instead. + Platform PlatformType `json:"platform,omitempty"` + + // platformStatus holds status information specific to the underlying + // infrastructure provider. + // +optional + PlatformStatus *PlatformStatus `json:"platformStatus,omitempty"` + + // etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering + // etcd servers and clients. + // For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery + // deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release. + EtcdDiscoveryDomain string `json:"etcdDiscoveryDomain"` + + // apiServerURL is a valid URI with scheme 'https', address and + // optionally a port (defaulting to 443). apiServerURL can be used by components like the web console + // to tell users where to find the Kubernetes API. + APIServerURL string `json:"apiServerURL"` + + // apiServerInternalURL is a valid URI with scheme 'https', + // address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components + // like kubelets, to contact the Kubernetes API server using the + // infrastructure provider rather than Kubernetes networking. + APIServerInternalURL string `json:"apiServerInternalURI"` + + // controlPlaneTopology expresses the expectations for operands that normally run on control nodes. + // The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster. + // The 'SingleReplica' mode will be used in single-node deployments + // and the operators should not configure the operand for highly-available operation + // The 'External' mode indicates that the control plane is hosted externally to the cluster and that + // its components are not visible within the cluster. + // +kubebuilder:default=HighlyAvailable + // +openshift:validation:FeatureGateAwareEnum:featureGate="",enum=HighlyAvailable;SingleReplica;External + // +openshift:validation:FeatureGateAwareEnum:featureGate=HighlyAvailableArbiter,enum=HighlyAvailable;HighlyAvailableArbiter;SingleReplica;External + ControlPlaneTopology TopologyMode `json:"controlPlaneTopology"` + + // infrastructureTopology expresses the expectations for infrastructure services that do not run on control + // plane nodes, usually indicated by a node selector for a `role` value + // other than `master`. + // The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster. + // The 'SingleReplica' mode will be used in single-node deployments + // and the operators should not configure the operand for highly-available operation + // NOTE: External topology mode is not applicable for this field. + // +kubebuilder:default=HighlyAvailable + // +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica + InfrastructureTopology TopologyMode `json:"infrastructureTopology"` + + // cpuPartitioning expresses if CPU partitioning is a currently enabled feature in the cluster. + // CPU Partitioning means that this cluster can support partitioning workloads to specific CPU Sets. + // Valid values are "None" and "AllNodes". When omitted, the default value is "None". + // The default value of "None" indicates that no nodes will be setup with CPU partitioning. + // The "AllNodes" value indicates that all nodes have been setup with CPU partitioning, + // and can then be further configured via the PerformanceProfile API. + // +kubebuilder:default=None + // +default="None" + // +kubebuilder:validation:Enum=None;AllNodes + // +optional + CPUPartitioning CPUPartitioningMode `json:"cpuPartitioning,omitempty"` +} + +// TopologyMode defines the topology mode of the control/infra nodes. +// NOTE: Enum validation is specified in each field that uses this type, +// given that External value is not applicable to the InfrastructureTopology +// field. +type TopologyMode string + +const ( + // "HighlyAvailable" is for operators to configure high-availability as much as possible. + HighlyAvailableTopologyMode TopologyMode = "HighlyAvailable" + + // "HighlyAvailableArbiter" is for operators to configure for an arbiter HA deployment. + HighlyAvailableArbiterMode TopologyMode = "HighlyAvailableArbiter" + + // "SingleReplica" is for operators to avoid spending resources for high-availability purpose. + SingleReplicaTopologyMode TopologyMode = "SingleReplica" + + // "External" indicates that the component is running externally to the cluster. When specified + // as the control plane topology, operators should avoid scheduling workloads to masters or assume + // that any of the control plane components such as kubernetes API server or etcd are visible within + // the cluster. + ExternalTopologyMode TopologyMode = "External" +) + +// CPUPartitioningMode defines the mode for CPU partitioning +type CPUPartitioningMode string + +const ( + // CPUPartitioningNone means that no CPU Partitioning is on in this cluster infrastructure + CPUPartitioningNone CPUPartitioningMode = "None" + + // CPUPartitioningAllNodes means that all nodes are configured with CPU Partitioning in this cluster + CPUPartitioningAllNodes CPUPartitioningMode = "AllNodes" +) + +// PlatformLoadBalancerType defines the type of load balancer used by the cluster. +type PlatformLoadBalancerType string + +const ( + // LoadBalancerTypeUserManaged is a load balancer with control-plane VIPs managed outside of the cluster by the customer. + LoadBalancerTypeUserManaged PlatformLoadBalancerType = "UserManaged" + + // LoadBalancerTypeOpenShiftManagedDefault is the default load balancer with control-plane VIPs managed by the OpenShift cluster. + LoadBalancerTypeOpenShiftManagedDefault PlatformLoadBalancerType = "OpenShiftManagedDefault" +) + +// PlatformType is a specific supported infrastructure provider. +// +kubebuilder:validation:Enum="";AWS;Azure;BareMetal;GCP;Libvirt;OpenStack;None;VSphere;oVirt;IBMCloud;KubeVirt;EquinixMetal;PowerVS;AlibabaCloud;Nutanix;External +type PlatformType string + +const ( + // AWSPlatformType represents Amazon Web Services infrastructure. + AWSPlatformType PlatformType = "AWS" + + // AzurePlatformType represents Microsoft Azure infrastructure. + AzurePlatformType PlatformType = "Azure" + + // BareMetalPlatformType represents managed bare metal infrastructure. + BareMetalPlatformType PlatformType = "BareMetal" + + // GCPPlatformType represents Google Cloud Platform infrastructure. + GCPPlatformType PlatformType = "GCP" + + // LibvirtPlatformType represents libvirt infrastructure. + LibvirtPlatformType PlatformType = "Libvirt" + + // OpenStackPlatformType represents OpenStack infrastructure. + OpenStackPlatformType PlatformType = "OpenStack" + + // NonePlatformType means there is no infrastructure provider. + NonePlatformType PlatformType = "None" + + // VSpherePlatformType represents VMWare vSphere infrastructure. + VSpherePlatformType PlatformType = "VSphere" + + // OvirtPlatformType represents oVirt/RHV infrastructure. + OvirtPlatformType PlatformType = "oVirt" + + // IBMCloudPlatformType represents IBM Cloud infrastructure. + IBMCloudPlatformType PlatformType = "IBMCloud" + + // KubevirtPlatformType represents KubeVirt/Openshift Virtualization infrastructure. + KubevirtPlatformType PlatformType = "KubeVirt" + + // EquinixMetalPlatformType represents Equinix Metal infrastructure. + EquinixMetalPlatformType PlatformType = "EquinixMetal" + + // PowerVSPlatformType represents IBM Power Systems Virtual Servers infrastructure. + PowerVSPlatformType PlatformType = "PowerVS" + + // AlibabaCloudPlatformType represents Alibaba Cloud infrastructure. + AlibabaCloudPlatformType PlatformType = "AlibabaCloud" + + // NutanixPlatformType represents Nutanix infrastructure. + NutanixPlatformType PlatformType = "Nutanix" + + // ExternalPlatformType represents generic infrastructure provider. Platform-specific components should be supplemented separately. + ExternalPlatformType PlatformType = "External" +) + +// IBMCloudProviderType is a specific supported IBM Cloud provider cluster type +type IBMCloudProviderType string + +const ( + // Classic means that the IBM Cloud cluster is using classic infrastructure + IBMCloudProviderTypeClassic IBMCloudProviderType = "Classic" + + // VPC means that the IBM Cloud cluster is using VPC infrastructure + IBMCloudProviderTypeVPC IBMCloudProviderType = "VPC" + + // IBMCloudProviderTypeUPI means that the IBM Cloud cluster is using user provided infrastructure. + // This is utilized in IBM Cloud Satellite environments. + IBMCloudProviderTypeUPI IBMCloudProviderType = "UPI" +) + +// DNSType indicates whether the cluster DNS is hosted by the cluster or Core DNS . +type DNSType string + +const ( + // ClusterHosted indicates that a DNS solution other than the default provided by the + // cloud platform is in use. In this mode, the cluster hosts a DNS solution during installation and the + // user is expected to provide their own DNS solution post-install. + // When the DNS solution is `ClusterHosted`, the cluster will continue to use the + // default Load Balancers provided by the cloud platform. + ClusterHostedDNSType DNSType = "ClusterHosted" + + // PlatformDefault indicates that the cluster is using the default DNS solution for the + // cloud platform. OpenShift is responsible for all the LB and DNS configuration needed for the + // cluster to be functional with no intervention from the user. To accomplish this, OpenShift + // configures the default LB and DNS solutions provided by the underlying cloud. + PlatformDefaultDNSType DNSType = "PlatformDefault" +) + +// ExternalPlatformSpec holds the desired state for the generic External infrastructure provider. +type ExternalPlatformSpec struct { + // platformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. + // This field is solely for informational and reporting purposes and is not expected to be used for decision-making. + // +kubebuilder:default:="Unknown" + // +default="Unknown" + // +kubebuilder:validation:XValidation:rule="oldSelf == 'Unknown' || self == oldSelf",message="platform name cannot be changed once set" + // +optional + PlatformName string `json:"platformName,omitempty"` +} + +// PlatformSpec holds the desired state specific to the underlying infrastructure provider +// of the current cluster. Since these are used at spec-level for the underlying cluster, it +// is supposed that only one of the spec structs is set. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.vsphere) && has(self.vsphere) ? size(self.vsphere.vcenters) < 2 : true",message="vcenters can have at most 1 item when configured post-install" +type PlatformSpec struct { + // type is the underlying infrastructure provider for the cluster. This + // value controls whether infrastructure automation such as service load + // balancers, dynamic volume provisioning, machine creation and deletion, and + // other integrations are enabled. If None, no infrastructure automation is + // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", + // "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", + // "AlibabaCloud", "Nutanix" and "None". Individual components may not support all platforms, + // and must handle unrecognized platforms as None if they do not support that platform. + // + // +unionDiscriminator + Type PlatformType `json:"type"` + + // aws contains settings specific to the Amazon Web Services infrastructure provider. + // +optional + AWS *AWSPlatformSpec `json:"aws,omitempty"` + + // azure contains settings specific to the Azure infrastructure provider. + // +optional + Azure *AzurePlatformSpec `json:"azure,omitempty"` + + // gcp contains settings specific to the Google Cloud Platform infrastructure provider. + // +optional + GCP *GCPPlatformSpec `json:"gcp,omitempty"` + + // baremetal contains settings specific to the BareMetal platform. + // +optional + BareMetal *BareMetalPlatformSpec `json:"baremetal,omitempty"` + + // openstack contains settings specific to the OpenStack infrastructure provider. + // +optional + OpenStack *OpenStackPlatformSpec `json:"openstack,omitempty"` + + // ovirt contains settings specific to the oVirt infrastructure provider. + // +optional + Ovirt *OvirtPlatformSpec `json:"ovirt,omitempty"` + + // vsphere contains settings specific to the VSphere infrastructure provider. + // +optional + VSphere *VSpherePlatformSpec `json:"vsphere,omitempty"` + + // ibmcloud contains settings specific to the IBMCloud infrastructure provider. + // +optional + IBMCloud *IBMCloudPlatformSpec `json:"ibmcloud,omitempty"` + + // kubevirt contains settings specific to the kubevirt infrastructure provider. + // +optional + Kubevirt *KubevirtPlatformSpec `json:"kubevirt,omitempty"` + + // equinixMetal contains settings specific to the Equinix Metal infrastructure provider. + // +optional + EquinixMetal *EquinixMetalPlatformSpec `json:"equinixMetal,omitempty"` + + // powervs contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. + // +optional + PowerVS *PowerVSPlatformSpec `json:"powervs,omitempty"` + + // alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + // +optional + AlibabaCloud *AlibabaCloudPlatformSpec `json:"alibabaCloud,omitempty"` + + // nutanix contains settings specific to the Nutanix infrastructure provider. + // +optional + Nutanix *NutanixPlatformSpec `json:"nutanix,omitempty"` + + // ExternalPlatformType represents generic infrastructure provider. + // Platform-specific components should be supplemented separately. + // +optional + External *ExternalPlatformSpec `json:"external,omitempty"` +} + +// CloudControllerManagerState defines whether Cloud Controller Manager presence is expected or not +type CloudControllerManagerState string + +const ( + // Cloud Controller Manager is enabled and expected to be installed. + // This value indicates that new nodes should be tainted as uninitialized when created, + // preventing them from running workloads until they are initialized by the cloud controller manager. + CloudControllerManagerExternal CloudControllerManagerState = "External" + + // Cloud Controller Manager is disabled and not expected to be installed. + // This value indicates that new nodes should not be tainted + // and no extra node initialization is expected from the cloud controller manager. + CloudControllerManagerNone CloudControllerManagerState = "None" +) + +// CloudControllerManagerStatus holds the state of Cloud Controller Manager (a.k.a. CCM or CPI) related settings +// +kubebuilder:validation:XValidation:rule="(has(self.state) == has(oldSelf.state)) || (!has(oldSelf.state) && self.state != \"External\")",message="state may not be added or removed once set" +type CloudControllerManagerStatus struct { + // state determines whether or not an external Cloud Controller Manager is expected to + // be installed within the cluster. + // https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager + // + // Valid values are "External", "None" and omitted. + // When set to "External", new nodes will be tainted as uninitialized when created, + // preventing them from running workloads until they are initialized by the cloud controller manager. + // When omitted or set to "None", new nodes will be not tainted + // and no extra initialization from the cloud controller manager is expected. + // +kubebuilder:validation:Enum="";External;None + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="state is immutable once set" + // +optional + State CloudControllerManagerState `json:"state"` +} + +// ExternalPlatformStatus holds the current status of the generic External infrastructure provider. +// +kubebuilder:validation:XValidation:rule="has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager)",message="cloudControllerManager may not be added or removed once set" +type ExternalPlatformStatus struct { + // cloudControllerManager contains settings specific to the external Cloud Controller Manager (a.k.a. CCM or CPI). + // When omitted, new nodes will be not tainted + // and no extra initialization from the cloud controller manager is expected. + // +optional + CloudControllerManager CloudControllerManagerStatus `json:"cloudControllerManager"` +} + +// PlatformStatus holds the current status specific to the underlying infrastructure provider +// of the current cluster. Since these are used at status-level for the underlying cluster, it +// is supposed that only one of the status structs is set. +type PlatformStatus struct { + // type is the underlying infrastructure provider for the cluster. This + // value controls whether infrastructure automation such as service load + // balancers, dynamic volume provisioning, machine creation and deletion, and + // other integrations are enabled. If None, no infrastructure automation is + // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", + // "OpenStack", "VSphere", "oVirt", "EquinixMetal", "PowerVS", "AlibabaCloud", "Nutanix" and "None". + // Individual components may not support all platforms, and must handle + // unrecognized platforms as None if they do not support that platform. + // + // This value will be synced with to the `status.platform` and `status.platformStatus.type`. + // Currently this value cannot be changed once set. + Type PlatformType `json:"type"` + + // aws contains settings specific to the Amazon Web Services infrastructure provider. + // +optional + AWS *AWSPlatformStatus `json:"aws,omitempty"` + + // azure contains settings specific to the Azure infrastructure provider. + // +optional + Azure *AzurePlatformStatus `json:"azure,omitempty"` + + // gcp contains settings specific to the Google Cloud Platform infrastructure provider. + // +optional + GCP *GCPPlatformStatus `json:"gcp,omitempty"` + + // baremetal contains settings specific to the BareMetal platform. + // +optional + BareMetal *BareMetalPlatformStatus `json:"baremetal,omitempty"` + + // openstack contains settings specific to the OpenStack infrastructure provider. + // +optional + OpenStack *OpenStackPlatformStatus `json:"openstack,omitempty"` + + // ovirt contains settings specific to the oVirt infrastructure provider. + // +optional + Ovirt *OvirtPlatformStatus `json:"ovirt,omitempty"` + + // vsphere contains settings specific to the VSphere infrastructure provider. + // +optional + VSphere *VSpherePlatformStatus `json:"vsphere,omitempty"` + + // ibmcloud contains settings specific to the IBMCloud infrastructure provider. + // +optional + IBMCloud *IBMCloudPlatformStatus `json:"ibmcloud,omitempty"` + + // kubevirt contains settings specific to the kubevirt infrastructure provider. + // +optional + Kubevirt *KubevirtPlatformStatus `json:"kubevirt,omitempty"` + + // equinixMetal contains settings specific to the Equinix Metal infrastructure provider. + // +optional + EquinixMetal *EquinixMetalPlatformStatus `json:"equinixMetal,omitempty"` + + // powervs contains settings specific to the Power Systems Virtual Servers infrastructure provider. + // +optional + PowerVS *PowerVSPlatformStatus `json:"powervs,omitempty"` + + // alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + // +optional + AlibabaCloud *AlibabaCloudPlatformStatus `json:"alibabaCloud,omitempty"` + + // nutanix contains settings specific to the Nutanix infrastructure provider. + // +optional + Nutanix *NutanixPlatformStatus `json:"nutanix,omitempty"` + + // external contains settings specific to the generic External infrastructure provider. + // +optional + External *ExternalPlatformStatus `json:"external,omitempty"` +} + +// AWSServiceEndpoint store the configuration of a custom url to +// override existing defaults of AWS Services. +type AWSServiceEndpoint struct { + // name is the name of the AWS service. + // The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html + // This must be provided and cannot be empty. + // + // +kubebuilder:validation:Pattern=`^[a-z0-9-]+$` + Name string `json:"name"` + + // url is fully qualified URI with scheme https, that overrides the default generated + // endpoint for a client. + // This must be provided and cannot be empty. + // + // +kubebuilder:validation:Pattern=`^https://` + URL string `json:"url"` +} + +// AWSPlatformSpec holds the desired state of the Amazon Web Services infrastructure provider. +// This only includes fields that can be modified in the cluster. +type AWSPlatformSpec struct { + // serviceEndpoints list contains custom endpoints which will override default + // service endpoint of AWS Services. + // There must be only one ServiceEndpoint for a service. + // +listType=atomic + // +optional + ServiceEndpoints []AWSServiceEndpoint `json:"serviceEndpoints,omitempty"` +} + +// AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider. +type AWSPlatformStatus struct { + // region holds the default AWS region for new AWS resources created by the cluster. + Region string `json:"region"` + + // serviceEndpoints list contains custom endpoints which will override default + // service endpoint of AWS Services. + // There must be only one ServiceEndpoint for a service. + // +listType=atomic + // +optional + ServiceEndpoints []AWSServiceEndpoint `json:"serviceEndpoints,omitempty"` + + // resourceTags is a list of additional tags to apply to AWS resources created for the cluster. + // See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. + // AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags + // available for the user. + // +kubebuilder:validation:MaxItems=25 + // +listType=atomic + // +optional + ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"` + + // cloudLoadBalancerConfig holds configuration related to DNS and cloud + // load balancers. It allows configuration of in-cluster DNS as an alternative + // to the platform default DNS implementation. + // When using the ClusterHosted DNS type, Load Balancer IP addresses + // must be provided for the API and internal API load balancers as well as the + // ingress load balancer. + // + // +default={"dnsType": "PlatformDefault"} + // +kubebuilder:default={"dnsType": "PlatformDefault"} + // +openshift:enable:FeatureGate=AWSClusterHostedDNS + // +optional + // +nullable + CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` +} + +// AWSResourceTag is a tag to apply to AWS resources created for the cluster. +type AWSResourceTag struct { + // key is the key of the tag + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` + // +required + Key string `json:"key"` + // value is the value of the tag. + // Some AWS service do not support empty values. Since tags are added to resources in many services, the + // length of the tag value must meet the requirements of all services. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` + // +required + Value string `json:"value"` +} + +// AzurePlatformSpec holds the desired state of the Azure infrastructure provider. +// This only includes fields that can be modified in the cluster. +type AzurePlatformSpec struct{} + +// AzurePlatformStatus holds the current status of the Azure infrastructure provider. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)",message="resourceTags may only be configured during installation" +type AzurePlatformStatus struct { + // resourceGroupName is the Resource Group for new Azure resources created for the cluster. + ResourceGroupName string `json:"resourceGroupName"` + + // networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. + // If empty, the value is same as ResourceGroupName. + // +optional + NetworkResourceGroupName string `json:"networkResourceGroupName,omitempty"` + + // cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK + // with the appropriate Azure API endpoints. + // If empty, the value is equal to `AzurePublicCloud`. + // +optional + CloudName AzureCloudEnvironment `json:"cloudName,omitempty"` + + // armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack. + // +optional + ARMEndpoint string `json:"armEndpoint,omitempty"` + + // resourceTags is a list of additional tags to apply to Azure resources created for the cluster. + // See https://docs.microsoft.com/en-us/rest/api/resources/tags for information on tagging Azure resources. + // Due to limitations on Automation, Content Delivery Network, DNS Azure resources, a maximum of 15 tags + // may be applied. OpenShift reserves 5 tags for internal use, allowing 10 tags for user configuration. + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:XValidation:rule="self.all(x, x in oldSelf) && oldSelf.all(x, x in self)",message="resourceTags are immutable and may only be configured during installation" + // +listType=atomic + // +optional + ResourceTags []AzureResourceTag `json:"resourceTags,omitempty"` +} + +// AzureResourceTag is a tag to apply to Azure resources created for the cluster. +type AzureResourceTag struct { + // key is the key part of the tag. A tag key can have a maximum of 128 characters and cannot be empty. Key + // must begin with a letter, end with a letter, number or underscore, and must contain only alphanumeric + // characters and the following special characters `_ . -`. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +kubebuilder:validation:Pattern=`^[a-zA-Z]([0-9A-Za-z_.-]*[0-9A-Za-z_])?$` + Key string `json:"key"` + // value is the value part of the tag. A tag value can have a maximum of 256 characters and cannot be empty. Value + // must contain only alphanumeric characters and the following special characters `_ + , - . / : ; < = > ? @`. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.=+-@]+$` + Value string `json:"value"` +} + +// AzureCloudEnvironment is the name of the Azure cloud environment +// +kubebuilder:validation:Enum="";AzurePublicCloud;AzureUSGovernmentCloud;AzureChinaCloud;AzureGermanCloud;AzureStackCloud +type AzureCloudEnvironment string + +const ( + // AzurePublicCloud is the general-purpose, public Azure cloud environment. + AzurePublicCloud AzureCloudEnvironment = "AzurePublicCloud" + + // AzureUSGovernmentCloud is the Azure cloud environment for the US government. + AzureUSGovernmentCloud AzureCloudEnvironment = "AzureUSGovernmentCloud" + + // AzureChinaCloud is the Azure cloud environment used in China. + AzureChinaCloud AzureCloudEnvironment = "AzureChinaCloud" + + // AzureGermanCloud is the Azure cloud environment used in Germany. + AzureGermanCloud AzureCloudEnvironment = "AzureGermanCloud" + + // AzureStackCloud is the Azure cloud environment used at the edge and on premises. + AzureStackCloud AzureCloudEnvironment = "AzureStackCloud" +) + +// GCPPlatformSpec holds the desired state of the Google Cloud Platform infrastructure provider. +// This only includes fields that can be modified in the cluster. +type GCPPlatformSpec struct{} + +// GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=GCPLabelsTags,rule="!has(oldSelf.resourceLabels) && !has(self.resourceLabels) || has(oldSelf.resourceLabels) && has(self.resourceLabels)",message="resourceLabels may only be configured during installation" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=GCPLabelsTags,rule="!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)",message="resourceTags may only be configured during installation" +type GCPPlatformStatus struct { + // resourceGroupName is the Project ID for new GCP resources created for the cluster. + ProjectID string `json:"projectID"` + + // region holds the region for new GCP resources created for the cluster. + Region string `json:"region"` + + // resourceLabels is a list of additional labels to apply to GCP resources created for the cluster. + // See https://cloud.google.com/compute/docs/labeling-resources for information on labeling GCP resources. + // GCP supports a maximum of 64 labels per resource. OpenShift reserves 32 labels for internal use, + // allowing 32 labels for user configuration. + // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, x in oldSelf) && oldSelf.all(x, x in self)",message="resourceLabels are immutable and may only be configured during installation" + // +listType=map + // +listMapKey=key + // +optional + // +openshift:enable:FeatureGate=GCPLabelsTags + ResourceLabels []GCPResourceLabel `json:"resourceLabels,omitempty"` + + // resourceTags is a list of additional tags to apply to GCP resources created for the cluster. + // See https://cloud.google.com/resource-manager/docs/tags/tags-overview for information on + // tagging GCP resources. GCP supports a maximum of 50 tags per resource. + // +kubebuilder:validation:MaxItems=50 + // +kubebuilder:validation:XValidation:rule="self.all(x, x in oldSelf) && oldSelf.all(x, x in self)",message="resourceTags are immutable and may only be configured during installation" + // +listType=map + // +listMapKey=key + // +optional + // +openshift:enable:FeatureGate=GCPLabelsTags + ResourceTags []GCPResourceTag `json:"resourceTags,omitempty"` + + // This field was introduced and removed under tech preview. + // To avoid conflicts with serialisation, this field name may never be used again. + // Tombstone the field as a reminder. + // ClusterHostedDNS ClusterHostedDNS `json:"clusterHostedDNS,omitempty"` + + // cloudLoadBalancerConfig holds configuration related to DNS and cloud + // load balancers. It allows configuration of in-cluster DNS as an alternative + // to the platform default DNS implementation. + // When using the ClusterHosted DNS type, Load Balancer IP addresses + // must be provided for the API and internal API load balancers as well as the + // ingress load balancer. + // + // +default={"dnsType": "PlatformDefault"} + // +kubebuilder:default={"dnsType": "PlatformDefault"} + // +openshift:enable:FeatureGate=GCPClusterHostedDNS + // +optional + // +nullable + CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` +} + +// GCPResourceLabel is a label to apply to GCP resources created for the cluster. +type GCPResourceLabel struct { + // key is the key part of the label. A label key can have a maximum of 63 characters and cannot be empty. + // Label key must begin with a lowercase letter, and must contain only lowercase letters, numeric characters, + // and the following special characters `_-`. Label key must not have the reserved prefixes `kubernetes-io` + // and `openshift-io`. + // +kubebuilder:validation:XValidation:rule="!self.startsWith('openshift-io') && !self.startsWith('kubernetes-io')",message="label keys must not start with either `openshift-io` or `kubernetes-io`" + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Pattern=`^[a-z][0-9a-z_-]{0,62}$` + Key string `json:"key"` + + // value is the value part of the label. A label value can have a maximum of 63 characters and cannot be empty. + // Value must contain only lowercase letters, numeric characters, and the following special characters `_-`. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Pattern=`^[0-9a-z_-]{1,63}$` + Value string `json:"value"` +} + +// GCPResourceTag is a tag to apply to GCP resources created for the cluster. +type GCPResourceTag struct { + // parentID is the ID of the hierarchical resource where the tags are defined, + // e.g. at the Organization or the Project level. To find the Organization or Project ID refer to the following pages: + // https://cloud.google.com/resource-manager/docs/creating-managing-organization#retrieving_your_organization_id, + // https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects. + // An OrganizationID must consist of decimal numbers, and cannot have leading zeroes. + // A ProjectID must be 6 to 30 characters in length, can only contain lowercase letters, numbers, + // and hyphens, and must start with a letter, and cannot end with a hyphen. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=32 + // +kubebuilder:validation:Pattern=`(^[1-9][0-9]{0,31}$)|(^[a-z][a-z0-9-]{4,28}[a-z0-9]$)` + ParentID string `json:"parentID"` + + // key is the key part of the tag. A tag key can have a maximum of 63 characters and cannot be empty. + // Tag key must begin and end with an alphanumeric character, and must contain only uppercase, lowercase + // alphanumeric characters, and the following special characters `._-`. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.-]{0,61}[a-zA-Z0-9])?$` + Key string `json:"key"` + + // value is the value part of the tag. A tag value can have a maximum of 63 characters and cannot be empty. + // Tag value must begin and end with an alphanumeric character, and must contain only uppercase, lowercase + // alphanumeric characters, and the following special characters `_-.@%=+:,*#&(){}[]` and spaces. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.@%=+:,*#&()\[\]{}\-\s]{0,61}[a-zA-Z0-9])?$` + Value string `json:"value"` +} + +// CloudLoadBalancerConfig contains an union discriminator indicating the type of DNS +// solution in use within the cluster. When the DNSType is `ClusterHosted`, the cloud's +// Load Balancer configuration needs to be provided so that the DNS solution hosted +// within the cluster can be configured with those values. +// +kubebuilder:validation:XValidation:rule="has(self.dnsType) && self.dnsType != 'ClusterHosted' ? !has(self.clusterHosted) : true",message="clusterHosted is permitted only when dnsType is ClusterHosted" +// +union +type CloudLoadBalancerConfig struct { + // dnsType indicates the type of DNS solution in use within the cluster. Its default value of + // `PlatformDefault` indicates that the cluster's DNS is the default provided by the cloud platform. + // It can be set to `ClusterHosted` to bypass the configuration of the cloud default DNS. In this mode, + // the cluster needs to provide a self-hosted DNS solution for the cluster's installation to succeed. + // The cluster's use of the cloud's Load Balancers is unaffected by this setting. + // The value is immutable after it has been set at install time. + // Currently, there is no way for the customer to add additional DNS entries into the cluster hosted DNS. + // Enabling this functionality allows the user to start their own DNS solution outside the cluster after + // installation is complete. The customer would be responsible for configuring this custom DNS solution, + // and it can be run in addition to the in-cluster DNS solution. + // +default="PlatformDefault" + // +kubebuilder:default:="PlatformDefault" + // +kubebuilder:validation:Enum="ClusterHosted";"PlatformDefault" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="dnsType is immutable" + // +optional + // +unionDiscriminator + DNSType DNSType `json:"dnsType,omitempty"` + + // clusterHosted holds the IP addresses of API, API-Int and Ingress Load + // Balancers on Cloud Platforms. The DNS solution hosted within the cluster + // use these IP addresses to provide resolution for API, API-Int and Ingress + // services. + // +optional + // +unionMember,optional + ClusterHosted *CloudLoadBalancerIPs `json:"clusterHosted,omitempty"` +} + +// CloudLoadBalancerIPs contains the Load Balancer IPs for the cloud's API, +// API-Int and Ingress Load balancers. They will be populated as soon as the +// respective Load Balancers have been configured. These values are utilized +// to configure the DNS solution hosted within the cluster. +type CloudLoadBalancerIPs struct { + // apiIntLoadBalancerIPs holds Load Balancer IPs for the internal API service. + // These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + // Entries in the apiIntLoadBalancerIPs must be unique. + // A maximum of 16 IP addresses are permitted. + // +kubebuilder:validation:Format=ip + // +listType=set + // +kubebuilder:validation:MaxItems=16 + // +optional + APIIntLoadBalancerIPs []IP `json:"apiIntLoadBalancerIPs,omitempty"` + + // apiLoadBalancerIPs holds Load Balancer IPs for the API service. + // These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + // Could be empty for private clusters. + // Entries in the apiLoadBalancerIPs must be unique. + // A maximum of 16 IP addresses are permitted. + // +kubebuilder:validation:Format=ip + // +listType=set + // +kubebuilder:validation:MaxItems=16 + // +optional + APILoadBalancerIPs []IP `json:"apiLoadBalancerIPs,omitempty"` + + // ingressLoadBalancerIPs holds IPs for Ingress Load Balancers. + // These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + // Entries in the ingressLoadBalancerIPs must be unique. + // A maximum of 16 IP addresses are permitted. + // +kubebuilder:validation:Format=ip + // +listType=set + // +kubebuilder:validation:MaxItems=16 + // +optional + IngressLoadBalancerIPs []IP `json:"ingressLoadBalancerIPs,omitempty"` +} + +// BareMetalPlatformLoadBalancer defines the load balancer used by the cluster on BareMetal platform. +// +union +type BareMetalPlatformLoadBalancer struct { + // type defines the type of load balancer used by the cluster on BareMetal platform + // which can be a user-managed or openshift-managed load balancer + // that is to be used for the OpenShift API and Ingress endpoints. + // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing + // defined in the machine config operator will be deployed. + // When set to UserManaged these static pods will not be deployed and it is expected that + // the load balancer is configured out of band by the deployer. + // When omitted, this means no opinion and the platform is left to choose a reasonable default. + // The default value is OpenShiftManagedDefault. + // +default="OpenShiftManagedDefault" + // +kubebuilder:default:="OpenShiftManagedDefault" + // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set" + // +optional + // +unionDiscriminator + Type PlatformLoadBalancerType `json:"type,omitempty"` +} + +// BareMetalPlatformSpec holds the desired state of the BareMetal infrastructure provider. +// This only includes fields that can be modified in the cluster. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)",message="apiServerInternalIPs list is required once set" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.ingressIPs) || has(self.ingressIPs)",message="ingressIPs list is required once set" +type BareMetalPlatformSpec struct { + // apiServerInternalIPs are the IP addresses to contact the Kubernetes API + // server that can be used by components inside the cluster, like kubelets + // using the infrastructure rather than Kubernetes networking. These are the + // IPs for a self-hosted load balancer in front of the API servers. + // In dual stack clusters this list contains two IP addresses, one from IPv4 + // family and one from IPv6. + // In single stack clusters a single IP address is expected. + // When omitted, values from the status.apiServerInternalIPs will be used. + // Once set, the list cannot be completely removed (but its second entry can). + // + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + // +optional + APIServerInternalIPs []IP `json:"apiServerInternalIPs"` + + // ingressIPs are the external IPs which route to the default ingress + // controller. The IPs are suitable targets of a wildcard DNS record used to + // resolve default route host names. + // In dual stack clusters this list contains two IP addresses, one from IPv4 + // family and one from IPv6. + // In single stack clusters a single IP address is expected. + // When omitted, values from the status.ingressIPs will be used. + // Once set, the list cannot be completely removed (but its second entry can). + // + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + // +optional + IngressIPs []IP `json:"ingressIPs"` + + // machineNetworks are IP networks used to connect all the OpenShift cluster + // nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, + // for example "10.0.0.0/8" or "fd00::/8". + // +listType=atomic + // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" + // +optional + MachineNetworks []CIDR `json:"machineNetworks"` +} + +// BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider. +// For more information about the network architecture used with the BareMetal platform type, see: +// https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md +type BareMetalPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + // + // Deprecated: Use APIServerInternalIPs instead. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // apiServerInternalIPs are the IP addresses to contact the Kubernetes API + // server that can be used by components inside the cluster, like kubelets + // using the infrastructure rather than Kubernetes networking. These are the + // IPs for a self-hosted load balancer in front of the API servers. In dual + // stack clusters this list contains two IPs otherwise only one. + // + // +kubebuilder:validation:Format=ip + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + APIServerInternalIPs []string `json:"apiServerInternalIPs"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + // + // Deprecated: Use IngressIPs instead. + IngressIP string `json:"ingressIP,omitempty"` + + // ingressIPs are the external IPs which route to the default ingress + // controller. The IPs are suitable targets of a wildcard DNS record used to + // resolve default route host names. In dual stack clusters this list + // contains two IPs otherwise only one. + // + // +kubebuilder:validation:Format=ip + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + IngressIPs []string `json:"ingressIPs"` + + // nodeDNSIP is the IP address for the internal DNS used by the + // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` + // provides name resolution for the nodes themselves. There is no DNS-as-a-service for + // BareMetal deployments. In order to minimize necessary changes to the + // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames + // to the nodes in the cluster. + NodeDNSIP string `json:"nodeDNSIP,omitempty"` + + // loadBalancer defines how the load balancer used by the cluster is configured. + // +default={"type": "OpenShiftManagedDefault"} + // +kubebuilder:default={"type": "OpenShiftManagedDefault"} + // +openshift:enable:FeatureGate=BareMetalLoadBalancer + // +optional + LoadBalancer *BareMetalPlatformLoadBalancer `json:"loadBalancer,omitempty"` + + // machineNetworks are IP networks used to connect all the OpenShift cluster nodes. + // +listType=atomic + // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" + // +optional + MachineNetworks []CIDR `json:"machineNetworks"` +} + +// OpenStackPlatformLoadBalancer defines the load balancer used by the cluster on OpenStack platform. +// +union +type OpenStackPlatformLoadBalancer struct { + // type defines the type of load balancer used by the cluster on OpenStack platform + // which can be a user-managed or openshift-managed load balancer + // that is to be used for the OpenShift API and Ingress endpoints. + // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing + // defined in the machine config operator will be deployed. + // When set to UserManaged these static pods will not be deployed and it is expected that + // the load balancer is configured out of band by the deployer. + // When omitted, this means no opinion and the platform is left to choose a reasonable default. + // The default value is OpenShiftManagedDefault. + // +default="OpenShiftManagedDefault" + // +kubebuilder:default:="OpenShiftManagedDefault" + // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set" + // +optional + // +unionDiscriminator + Type PlatformLoadBalancerType `json:"type,omitempty"` +} + +// OpenStackPlatformSpec holds the desired state of the OpenStack infrastructure provider. +// This only includes fields that can be modified in the cluster. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)",message="apiServerInternalIPs list is required once set" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.ingressIPs) || has(self.ingressIPs)",message="ingressIPs list is required once set" +type OpenStackPlatformSpec struct { + // apiServerInternalIPs are the IP addresses to contact the Kubernetes API + // server that can be used by components inside the cluster, like kubelets + // using the infrastructure rather than Kubernetes networking. These are the + // IPs for a self-hosted load balancer in front of the API servers. + // In dual stack clusters this list contains two IP addresses, one from IPv4 + // family and one from IPv6. + // In single stack clusters a single IP address is expected. + // When omitted, values from the status.apiServerInternalIPs will be used. + // Once set, the list cannot be completely removed (but its second entry can). + // + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + // +optional + APIServerInternalIPs []IP `json:"apiServerInternalIPs"` + + // ingressIPs are the external IPs which route to the default ingress + // controller. The IPs are suitable targets of a wildcard DNS record used to + // resolve default route host names. + // In dual stack clusters this list contains two IP addresses, one from IPv4 + // family and one from IPv6. + // In single stack clusters a single IP address is expected. + // When omitted, values from the status.ingressIPs will be used. + // Once set, the list cannot be completely removed (but its second entry can). + // + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + // +optional + IngressIPs []IP `json:"ingressIPs"` + + // machineNetworks are IP networks used to connect all the OpenShift cluster + // nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, + // for example "10.0.0.0/8" or "fd00::/8". + // +listType=atomic + // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" + // +optional + MachineNetworks []CIDR `json:"machineNetworks"` +} + +// OpenStackPlatformStatus holds the current status of the OpenStack infrastructure provider. +type OpenStackPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + // + // Deprecated: Use APIServerInternalIPs instead. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // apiServerInternalIPs are the IP addresses to contact the Kubernetes API + // server that can be used by components inside the cluster, like kubelets + // using the infrastructure rather than Kubernetes networking. These are the + // IPs for a self-hosted load balancer in front of the API servers. In dual + // stack clusters this list contains two IPs otherwise only one. + // + // +kubebuilder:validation:Format=ip + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + APIServerInternalIPs []string `json:"apiServerInternalIPs"` + + // cloudName is the name of the desired OpenStack cloud in the + // client configuration file (`clouds.yaml`). + CloudName string `json:"cloudName,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + // + // Deprecated: Use IngressIPs instead. + IngressIP string `json:"ingressIP,omitempty"` + + // ingressIPs are the external IPs which route to the default ingress + // controller. The IPs are suitable targets of a wildcard DNS record used to + // resolve default route host names. In dual stack clusters this list + // contains two IPs otherwise only one. + // + // +kubebuilder:validation:Format=ip + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + IngressIPs []string `json:"ingressIPs"` + + // nodeDNSIP is the IP address for the internal DNS used by the + // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` + // provides name resolution for the nodes themselves. There is no DNS-as-a-service for + // OpenStack deployments. In order to minimize necessary changes to the + // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames + // to the nodes in the cluster. + NodeDNSIP string `json:"nodeDNSIP,omitempty"` + + // loadBalancer defines how the load balancer used by the cluster is configured. + // +default={"type": "OpenShiftManagedDefault"} + // +kubebuilder:default={"type": "OpenShiftManagedDefault"} + // +optional + LoadBalancer *OpenStackPlatformLoadBalancer `json:"loadBalancer,omitempty"` + + // machineNetworks are IP networks used to connect all the OpenShift cluster nodes. + // +listType=atomic + // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" + // +optional + MachineNetworks []CIDR `json:"machineNetworks"` +} + +// OvirtPlatformLoadBalancer defines the load balancer used by the cluster on Ovirt platform. +// +union +type OvirtPlatformLoadBalancer struct { + // type defines the type of load balancer used by the cluster on Ovirt platform + // which can be a user-managed or openshift-managed load balancer + // that is to be used for the OpenShift API and Ingress endpoints. + // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing + // defined in the machine config operator will be deployed. + // When set to UserManaged these static pods will not be deployed and it is expected that + // the load balancer is configured out of band by the deployer. + // When omitted, this means no opinion and the platform is left to choose a reasonable default. + // The default value is OpenShiftManagedDefault. + // +default="OpenShiftManagedDefault" + // +kubebuilder:default:="OpenShiftManagedDefault" + // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set" + // +optional + // +unionDiscriminator + Type PlatformLoadBalancerType `json:"type,omitempty"` +} + +// OvirtPlatformSpec holds the desired state of the oVirt infrastructure provider. +// This only includes fields that can be modified in the cluster. +type OvirtPlatformSpec struct{} + +// OvirtPlatformStatus holds the current status of the oVirt infrastructure provider. +type OvirtPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + // + // Deprecated: Use APIServerInternalIPs instead. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // apiServerInternalIPs are the IP addresses to contact the Kubernetes API + // server that can be used by components inside the cluster, like kubelets + // using the infrastructure rather than Kubernetes networking. These are the + // IPs for a self-hosted load balancer in front of the API servers. In dual + // stack clusters this list contains two IPs otherwise only one. + // + // +kubebuilder:validation:Format=ip + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=set + APIServerInternalIPs []string `json:"apiServerInternalIPs"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + // + // Deprecated: Use IngressIPs instead. + IngressIP string `json:"ingressIP,omitempty"` + + // ingressIPs are the external IPs which route to the default ingress + // controller. The IPs are suitable targets of a wildcard DNS record used to + // resolve default route host names. In dual stack clusters this list + // contains two IPs otherwise only one. + // + // +kubebuilder:validation:Format=ip + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=set + IngressIPs []string `json:"ingressIPs"` + + // deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release. + NodeDNSIP string `json:"nodeDNSIP,omitempty"` + + // loadBalancer defines how the load balancer used by the cluster is configured. + // +default={"type": "OpenShiftManagedDefault"} + // +kubebuilder:default={"type": "OpenShiftManagedDefault"} + // +openshift:enable:FeatureGate=BareMetalLoadBalancer + // +optional + LoadBalancer *OvirtPlatformLoadBalancer `json:"loadBalancer,omitempty"` +} + +// VSpherePlatformLoadBalancer defines the load balancer used by the cluster on VSphere platform. +// +union +type VSpherePlatformLoadBalancer struct { + // type defines the type of load balancer used by the cluster on VSphere platform + // which can be a user-managed or openshift-managed load balancer + // that is to be used for the OpenShift API and Ingress endpoints. + // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing + // defined in the machine config operator will be deployed. + // When set to UserManaged these static pods will not be deployed and it is expected that + // the load balancer is configured out of band by the deployer. + // When omitted, this means no opinion and the platform is left to choose a reasonable default. + // The default value is OpenShiftManagedDefault. + // +default="OpenShiftManagedDefault" + // +kubebuilder:default:="OpenShiftManagedDefault" + // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set" + // +optional + // +unionDiscriminator + Type PlatformLoadBalancerType `json:"type,omitempty"` +} + +// The VSphereFailureDomainZoneType is a string representation of a failure domain +// zone type. There are two supportable types HostGroup and ComputeCluster +// +enum +type VSphereFailureDomainZoneType string + +// The VSphereFailureDomainRegionType is a string representation of a failure domain +// region type. There are two supportable types ComputeCluster and Datacenter +// +enum +type VSphereFailureDomainRegionType string + +const ( + // HostGroupFailureDomainZone is a failure domain zone for a vCenter vm-host group. + HostGroupFailureDomainZone VSphereFailureDomainZoneType = "HostGroup" + // ComputeClusterFailureDomainZone is a failure domain zone for a vCenter compute cluster. + ComputeClusterFailureDomainZone VSphereFailureDomainZoneType = "ComputeCluster" + // DatacenterFailureDomainRegion is a failure domain region for a vCenter datacenter. + DatacenterFailureDomainRegion VSphereFailureDomainRegionType = "Datacenter" + // ComputeClusterFailureDomainRegion is a failure domain region for a vCenter compute cluster. + ComputeClusterFailureDomainRegion VSphereFailureDomainRegionType = "ComputeCluster" +) + +// VSpherePlatformFailureDomainSpec holds the region and zone failure domain and the vCenter topology of that failure domain. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=VSphereHostVMGroupZonal,rule="has(self.zoneAffinity) && self.zoneAffinity.type == 'HostGroup' ? has(self.regionAffinity) && self.regionAffinity.type == 'ComputeCluster' : true",message="when zoneAffinity type is HostGroup, regionAffinity type must be ComputeCluster" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=VSphereHostVMGroupZonal,rule="has(self.zoneAffinity) && self.zoneAffinity.type == 'ComputeCluster' ? has(self.regionAffinity) && self.regionAffinity.type == 'Datacenter' : true",message="when zoneAffinity type is ComputeCluster, regionAffinity type must be Datacenter" +type VSpherePlatformFailureDomainSpec struct { + // name defines the arbitrary but unique name + // of a failure domain. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + Name string `json:"name"` + + // region defines the name of a region tag that will + // be attached to a vCenter datacenter. The tag + // category in vCenter must be named openshift-region. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=80 + // +required + Region string `json:"region"` + + // zone defines the name of a zone tag that will + // be attached to a vCenter cluster. The tag + // category in vCenter must be named openshift-zone. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=80 + // +required + Zone string `json:"zone"` + + // regionAffinity holds the type of region, Datacenter or ComputeCluster. + // When set to Datacenter, this means the region is a vCenter Datacenter as defined in topology. + // When set to ComputeCluster, this means the region is a vCenter Cluster as defined in topology. + // +openshift:validation:featureGate=VSphereHostVMGroupZonal + // +optional + RegionAffinity *VSphereFailureDomainRegionAffinity `json:"regionAffinity,omitempty"` + + // zoneAffinity holds the type of the zone and the hostGroup which + // vmGroup and the hostGroup names in vCenter corresponds to + // a vm-host group of type Virtual Machine and Host respectively. Is also + // contains the vmHostRule which is an affinity vm-host rule in vCenter. + // +openshift:validation:featureGate=VSphereHostVMGroupZonal + // +optional + ZoneAffinity *VSphereFailureDomainZoneAffinity `json:"zoneAffinity,omitempty"` + + // server is the fully-qualified domain name or the IP address of the vCenter server. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + // --- + // + Validation is applied via a patch, we validate the format as either ipv4, ipv6 or hostname + Server string `json:"server"` + + // topology describes a given failure domain using vSphere constructs + // +required + Topology VSpherePlatformTopology `json:"topology"` +} + +// VSpherePlatformTopology holds the required and optional vCenter objects - datacenter, +// computeCluster, networks, datastore and resourcePool - to provision virtual machines. +type VSpherePlatformTopology struct { + // datacenter is the name of vCenter datacenter in which virtual machines will be located. + // The maximum length of the datacenter name is 80 characters. + // +required + // +kubebuilder:validation:MaxLength=80 + Datacenter string `json:"datacenter"` + + // computeCluster the absolute path of the vCenter cluster + // in which virtual machine will be located. + // The absolute path is of the form //host/. + // The maximum length of the path is 2048 characters. + // +required + // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:Pattern=`^/.*?/host/.*?` + ComputeCluster string `json:"computeCluster"` + + // networks is the list of port group network names within this failure domain. + // If feature gate VSphereMultiNetworks is enabled, up to 10 network adapters may be defined. + // 10 is the maximum number of virtual network devices which may be attached to a VM as defined by: + // https://configmax.esp.vmware.com/guest?vmwareproduct=vSphere&release=vSphere%208.0&categories=1-0 + // The available networks (port groups) can be listed using + // `govc ls 'network/*'` + // Networks should be in the form of an absolute path: + // //network/. + // +required + // +openshift:validation:FeatureGateAwareMaxItems:featureGate="",maxItems=1 + // +openshift:validation:FeatureGateAwareMaxItems:featureGate=VSphereMultiNetworks,maxItems=10 + // +kubebuilder:validation:MinItems=1 + // +listType=atomic + Networks []string `json:"networks"` + + // datastore is the absolute path of the datastore in which the + // virtual machine is located. + // The absolute path is of the form //datastore/ + // The maximum length of the path is 2048 characters. + // +required + // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:Pattern=`^/.*?/datastore/.*?` + Datastore string `json:"datastore"` + + // resourcePool is the absolute path of the resource pool where virtual machines will be + // created. The absolute path is of the form //host//Resources/. + // The maximum length of the path is 2048 characters. + // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:Pattern=`^/.*?/host/.*?/Resources.*` + // +optional + ResourcePool string `json:"resourcePool,omitempty"` + + // folder is the absolute path of the folder where + // virtual machines are located. The absolute path + // is of the form //vm/. + // The maximum length of the path is 2048 characters. + // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:Pattern=`^/.*?/vm/.*?` + // +optional + Folder string `json:"folder,omitempty"` + + // template is the full inventory path of the virtual machine or template + // that will be cloned when creating new machines in this failure domain. + // The maximum length of the path is 2048 characters. + // + // When omitted, the template will be calculated by the control plane + // machineset operator based on the region and zone defined in + // VSpherePlatformFailureDomainSpec. + // For example, for zone=zonea, region=region1, and infrastructure name=test, + // the template path would be calculated as //vm/test-rhcos-region1-zonea. + // +openshift:enable:FeatureGate=VSphereControlPlaneMachineSet + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:Pattern=`^/.*?/vm/.*?` + // +optional + Template string `json:"template,omitempty"` +} + +// VSphereFailureDomainZoneAffinity contains the vCenter cluster vm-host group (virtual machine and host types) +// and the vm-host affinity rule that together creates an affinity configuration for vm-host based zonal. +// This configuration within vCenter creates the required association between a failure domain, virtual machines +// and ESXi hosts to create a vm-host based zone. +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'HostGroup' ? has(self.hostGroup) : !has(self.hostGroup)",message="hostGroup is required when type is HostGroup, and forbidden otherwise" +// +union +type VSphereFailureDomainZoneAffinity struct { + // type determines the vSphere object type for a zone within this failure domain. + // Available types are ComputeCluster and HostGroup. + // When set to ComputeCluster, this means the vCenter cluster defined is the zone. + // When set to HostGroup, hostGroup must be configured with hostGroup, vmGroup and vmHostRule and + // this means the zone is defined by the grouping of those fields. + // +kubebuilder:validation:Enum:=HostGroup;ComputeCluster + // +required + // +unionDiscriminator + Type VSphereFailureDomainZoneType `json:"type"` + + // hostGroup holds the vmGroup and the hostGroup names in vCenter + // corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also + // contains the vmHostRule which is an affinity vm-host rule in vCenter. + // +unionMember + // +optional + HostGroup *VSphereFailureDomainHostGroup `json:"hostGroup,omitempty"` +} + +// VSphereFailureDomainRegionAffinity contains the region type which is the string representation of the +// VSphereFailureDomainRegionType with available options of Datacenter and ComputeCluster. +// +union +type VSphereFailureDomainRegionAffinity struct { + // type determines the vSphere object type for a region within this failure domain. + // Available types are Datacenter and ComputeCluster. + // When set to Datacenter, this means the vCenter Datacenter defined is the region. + // When set to ComputeCluster, this means the vCenter cluster defined is the region. + // +kubebuilder:validation:Enum:=ComputeCluster;Datacenter + // +required + // +unionDiscriminator + Type VSphereFailureDomainRegionType `json:"type"` +} + +// VSphereFailureDomainHostGroup holds the vmGroup and the hostGroup names in vCenter +// corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also +// contains the vmHostRule which is an affinity vm-host rule in vCenter. +type VSphereFailureDomainHostGroup struct { + // vmGroup is the name of the vm-host group of type virtual machine within vCenter for this failure domain. + // vmGroup is limited to 80 characters. + // This field is required when the VSphereFailureDomain ZoneType is HostGroup + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=80 + // +required + VMGroup string `json:"vmGroup"` + + // hostGroup is the name of the vm-host group of type host within vCenter for this failure domain. + // hostGroup is limited to 80 characters. + // This field is required when the VSphereFailureDomain ZoneType is HostGroup + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=80 + // +required + HostGroup string `json:"hostGroup"` + + // vmHostRule is the name of the affinity vm-host rule within vCenter for this failure domain. + // vmHostRule is limited to 80 characters. + // This field is required when the VSphereFailureDomain ZoneType is HostGroup + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=80 + // +required + VMHostRule string `json:"vmHostRule"` +} + +// VSpherePlatformVCenterSpec stores the vCenter connection fields. +// This is used by the vSphere CCM. +type VSpherePlatformVCenterSpec struct { + + // server is the fully-qualified domain name or the IP address of the vCenter server. + // +required + // +kubebuilder:validation:MaxLength=255 + // --- + // + Validation is applied via a patch, we validate the format as either ipv4, ipv6 or hostname + Server string `json:"server"` + + // port is the TCP port that will be used to communicate to + // the vCenter endpoint. + // When omitted, this means the user has no opinion and + // it is up to the platform to choose a sensible default, + // which is subject to change over time. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=32767 + // +optional + Port int32 `json:"port,omitempty"` + + // The vCenter Datacenters in which the RHCOS + // vm guests are located. This field will + // be used by the Cloud Controller Manager. + // Each datacenter listed here should be used within + // a topology. + // +required + // +kubebuilder:validation:MinItems=1 + // +listType=set + Datacenters []string `json:"datacenters"` +} + +// VSpherePlatformNodeNetworkingSpec holds the network CIDR(s) and port group name for +// including and excluding IP ranges in the cloud provider. +// This would be used for example when multiple network adapters are attached to +// a guest to help determine which IP address the cloud config manager should use +// for the external and internal node networking. +type VSpherePlatformNodeNetworkingSpec struct { + // networkSubnetCidr IP address on VirtualMachine's network interfaces included in the fields' CIDRs + // that will be used in respective status.addresses fields. + // --- + // + Validation is applied via a patch, we validate the format as cidr + // +listType=set + // +optional + NetworkSubnetCIDR []string `json:"networkSubnetCidr,omitempty"` + + // network VirtualMachine's VM Network names that will be used to when searching + // for status.addresses fields. Note that if internal.networkSubnetCIDR and + // external.networkSubnetCIDR are not set, then the vNIC associated to this network must + // only have a single IP address assigned to it. + // The available networks (port groups) can be listed using + // `govc ls 'network/*'` + // +optional + Network string `json:"network,omitempty"` + + // excludeNetworkSubnetCidr IP addresses in subnet ranges will be excluded when selecting + // the IP address from the VirtualMachine's VM for use in the status.addresses fields. + // --- + // + Validation is applied via a patch, we validate the format as cidr + // +listType=atomic + // +optional + ExcludeNetworkSubnetCIDR []string `json:"excludeNetworkSubnetCidr,omitempty"` +} + +// VSpherePlatformNodeNetworking holds the external and internal node networking spec. +type VSpherePlatformNodeNetworking struct { + // external represents the network configuration of the node that is externally routable. + // +optional + External VSpherePlatformNodeNetworkingSpec `json:"external"` + // internal represents the network configuration of the node that is routable only within the cluster. + // +optional + Internal VSpherePlatformNodeNetworkingSpec `json:"internal"` +} + +// VSpherePlatformSpec holds the desired state of the vSphere infrastructure provider. +// In the future the cloud provider operator, storage operator and machine operator will +// use these fields for configuration. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)",message="apiServerInternalIPs list is required once set" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.ingressIPs) || has(self.ingressIPs)",message="ingressIPs list is required once set" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.vcenters) && has(self.vcenters) ? size(self.vcenters) < 2 : true",message="vcenters can have at most 1 item when configured post-install" +type VSpherePlatformSpec struct { + // vcenters holds the connection details for services to communicate with vCenter. + // Currently, only a single vCenter is supported, but in tech preview 3 vCenters are supported. + // Once the cluster has been installed, you are unable to change the current number of defined + // vCenters except in the case where the cluster has been upgraded from a version of OpenShift + // where the vsphere platform spec was not present. You may make modifications to the existing + // vCenters that are defined in the vcenters list in order to match with any added or modified + // failure domains. + // --- + // + If VCenters is not defined use the existing cloud-config configmap defined + // + in openshift-config. + // +kubebuilder:validation:MinItems=0 + // +openshift:validation:FeatureGateAwareMaxItems:featureGate="",maxItems=1 + // +openshift:validation:FeatureGateAwareMaxItems:featureGate=VSphereMultiVCenters,maxItems=3 + // +kubebuilder:validation:XValidation:rule="size(self) != size(oldSelf) ? size(oldSelf) == 0 && size(self) < 2 : true",message="vcenters cannot be added or removed once set" + // +listType=atomic + // +optional + VCenters []VSpherePlatformVCenterSpec `json:"vcenters,omitempty"` + + // failureDomains contains the definition of region, zone and the vCenter topology. + // If this is omitted failure domains (regions and zones) will not be used. + // +listType=map + // +listMapKey=name + // +optional + FailureDomains []VSpherePlatformFailureDomainSpec `json:"failureDomains,omitempty"` + + // nodeNetworking contains the definition of internal and external network constraints for + // assigning the node's networking. + // If this field is omitted, networking defaults to the legacy + // address selection behavior which is to only support a single address and + // return the first one found. + // +optional + NodeNetworking VSpherePlatformNodeNetworking `json:"nodeNetworking,omitempty"` + + // apiServerInternalIPs are the IP addresses to contact the Kubernetes API + // server that can be used by components inside the cluster, like kubelets + // using the infrastructure rather than Kubernetes networking. These are the + // IPs for a self-hosted load balancer in front of the API servers. + // In dual stack clusters this list contains two IP addresses, one from IPv4 + // family and one from IPv6. + // In single stack clusters a single IP address is expected. + // When omitted, values from the status.apiServerInternalIPs will be used. + // Once set, the list cannot be completely removed (but its second entry can). + // + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + // +optional + APIServerInternalIPs []IP `json:"apiServerInternalIPs"` + + // ingressIPs are the external IPs which route to the default ingress + // controller. The IPs are suitable targets of a wildcard DNS record used to + // resolve default route host names. + // In dual stack clusters this list contains two IP addresses, one from IPv4 + // family and one from IPv6. + // In single stack clusters a single IP address is expected. + // When omitted, values from the status.ingressIPs will be used. + // Once set, the list cannot be completely removed (but its second entry can). + // + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + // +optional + IngressIPs []IP `json:"ingressIPs"` + + // machineNetworks are IP networks used to connect all the OpenShift cluster + // nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, + // for example "10.0.0.0/8" or "fd00::/8". + // +listType=atomic + // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" + // +optional + MachineNetworks []CIDR `json:"machineNetworks"` +} + +// VSpherePlatformStatus holds the current status of the vSphere infrastructure provider. +type VSpherePlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + // + // Deprecated: Use APIServerInternalIPs instead. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // apiServerInternalIPs are the IP addresses to contact the Kubernetes API + // server that can be used by components inside the cluster, like kubelets + // using the infrastructure rather than Kubernetes networking. These are the + // IPs for a self-hosted load balancer in front of the API servers. In dual + // stack clusters this list contains two IPs otherwise only one. + // + // +kubebuilder:validation:Format=ip + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + APIServerInternalIPs []string `json:"apiServerInternalIPs"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + // + // Deprecated: Use IngressIPs instead. + IngressIP string `json:"ingressIP,omitempty"` + + // ingressIPs are the external IPs which route to the default ingress + // controller. The IPs are suitable targets of a wildcard DNS record used to + // resolve default route host names. In dual stack clusters this list + // contains two IPs otherwise only one. + // + // +kubebuilder:validation:Format=ip + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic + IngressIPs []string `json:"ingressIPs"` + + // nodeDNSIP is the IP address for the internal DNS used by the + // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` + // provides name resolution for the nodes themselves. There is no DNS-as-a-service for + // vSphere deployments. In order to minimize necessary changes to the + // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames + // to the nodes in the cluster. + NodeDNSIP string `json:"nodeDNSIP,omitempty"` + + // loadBalancer defines how the load balancer used by the cluster is configured. + // +default={"type": "OpenShiftManagedDefault"} + // +kubebuilder:default={"type": "OpenShiftManagedDefault"} + // +openshift:enable:FeatureGate=BareMetalLoadBalancer + // +optional + LoadBalancer *VSpherePlatformLoadBalancer `json:"loadBalancer,omitempty"` + + // machineNetworks are IP networks used to connect all the OpenShift cluster nodes. + // +listType=atomic + // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" + // +optional + MachineNetworks []CIDR `json:"machineNetworks"` +} + +// IBMCloudServiceEndpoint stores the configuration of a custom url to +// override existing defaults of IBM Cloud Services. +type IBMCloudServiceEndpoint struct { + // name is the name of the IBM Cloud service. + // Possible values are: CIS, COS, COSConfig, DNSServices, GlobalCatalog, GlobalSearch, GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, ResourceManager, or VPC. + // For example, the IBM Cloud Private IAM service could be configured with the + // service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` + // Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured + // with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com` + // + // +required + Name IBMCloudServiceName `json:"name"` + + // url is fully qualified URI with scheme https, that overrides the default generated + // endpoint for a client. + // This must be provided and cannot be empty. + // + // +required + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="url must be a valid absolute URL" + URL string `json:"url"` +} + +// IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. +// This only includes fields that can be modified in the cluster. +type IBMCloudPlatformSpec struct{} + +// IBMCloudPlatformStatus holds the current status of the IBMCloud infrastructure provider. +type IBMCloudPlatformStatus struct { + // location is where the cluster has been deployed + Location string `json:"location,omitempty"` + + // resourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. + ResourceGroupName string `json:"resourceGroupName,omitempty"` + + // providerType indicates the type of cluster that was created + ProviderType IBMCloudProviderType `json:"providerType,omitempty"` + + // cisInstanceCRN is the CRN of the Cloud Internet Services instance managing + // the DNS zone for the cluster's base domain + CISInstanceCRN string `json:"cisInstanceCRN,omitempty"` + + // dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + // for the cluster's base domain + DNSInstanceCRN string `json:"dnsInstanceCRN,omitempty"` + + // serviceEndpoints is a list of custom endpoints which will override the default + // service endpoints of an IBM Cloud service. These endpoints are consumed by + // components within the cluster to reach the respective IBM Cloud Services. + // +listType=map + // +listMapKey=name + // +optional + ServiceEndpoints []IBMCloudServiceEndpoint `json:"serviceEndpoints,omitempty"` +} + +// KubevirtPlatformSpec holds the desired state of the kubevirt infrastructure provider. +// This only includes fields that can be modified in the cluster. +type KubevirtPlatformSpec struct{} + +// KubevirtPlatformStatus holds the current status of the kubevirt infrastructure provider. +type KubevirtPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` +} + +// EquinixMetalPlatformSpec holds the desired state of the Equinix Metal infrastructure provider. +// This only includes fields that can be modified in the cluster. +type EquinixMetalPlatformSpec struct{} + +// EquinixMetalPlatformStatus holds the current status of the Equinix Metal infrastructure provider. +type EquinixMetalPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` +} + +// PowervsServiceEndpoint stores the configuration of a custom url to +// override existing defaults of PowerVS Services. +type PowerVSServiceEndpoint struct { + // name is the name of the Power VS service. + // Few of the services are + // IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api + // ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller + // Power Cloud - https://cloud.ibm.com/apidocs/power-cloud + // + // +required + // +kubebuilder:validation:Enum=CIS;COS;COSConfig;DNSServices;GlobalCatalog;GlobalSearch;GlobalTagging;HyperProtect;IAM;KeyProtect;Power;ResourceController;ResourceManager;VPC + Name string `json:"name"` + + // url is fully qualified URI with scheme https, that overrides the default generated + // endpoint for a client. + // This must be provided and cannot be empty. + // + // +required + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Format=uri + // +kubebuilder:validation:Pattern=`^https://` + URL string `json:"url"` +} + +// PowerVSPlatformSpec holds the desired state of the IBM Power Systems Virtual Servers infrastructure provider. +// This only includes fields that can be modified in the cluster. +type PowerVSPlatformSpec struct { + // serviceEndpoints is a list of custom endpoints which will override the default + // service endpoints of a Power VS service. + // +listType=map + // +listMapKey=name + // +optional + ServiceEndpoints []PowerVSServiceEndpoint `json:"serviceEndpoints,omitempty"` +} + +// PowerVSPlatformStatus holds the current status of the IBM Power Systems Virtual Servers infrastrucutre provider. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.resourceGroup) || has(self.resourceGroup)",message="cannot unset resourceGroup once set" +type PowerVSPlatformStatus struct { + // region holds the default Power VS region for new Power VS resources created by the cluster. + Region string `json:"region"` + + // zone holds the default zone for the new Power VS resources created by the cluster. + // Note: Currently only single-zone OCP clusters are supported + Zone string `json:"zone"` + + // resourceGroup is the resource group name for new IBMCloud resources created for a cluster. + // The resource group specified here will be used by cluster-image-registry-operator to set up a COS Instance in IBMCloud for the cluster registry. + // More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. + // When omitted, the image registry operator won't be able to configure storage, + // which results in the image registry cluster operator not being in an available state. + // + // +kubebuilder:validation:Pattern=^[a-zA-Z0-9-_ ]+$ + // +kubebuilder:validation:MaxLength=40 + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="resourceGroup is immutable once set" + // +optional + ResourceGroup string `json:"resourceGroup"` + + // serviceEndpoints is a list of custom endpoints which will override the default + // service endpoints of a Power VS service. + // +listType=map + // +listMapKey=name + // +optional + ServiceEndpoints []PowerVSServiceEndpoint `json:"serviceEndpoints,omitempty"` + + // cisInstanceCRN is the CRN of the Cloud Internet Services instance managing + // the DNS zone for the cluster's base domain + CISInstanceCRN string `json:"cisInstanceCRN,omitempty"` + + // dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + // for the cluster's base domain + DNSInstanceCRN string `json:"dnsInstanceCRN,omitempty"` +} + +// AlibabaCloudPlatformSpec holds the desired state of the Alibaba Cloud infrastructure provider. +// This only includes fields that can be modified in the cluster. +type AlibabaCloudPlatformSpec struct{} + +// AlibabaCloudPlatformStatus holds the current status of the Alibaba Cloud infrastructure provider. +type AlibabaCloudPlatformStatus struct { + // region specifies the region for Alibaba Cloud resources created for the cluster. + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z-]+$` + // +required + Region string `json:"region"` + // resourceGroupID is the ID of the resource group for the cluster. + // +kubebuilder:validation:Pattern=`^(rg-[0-9A-Za-z]+)?$` + // +optional + ResourceGroupID string `json:"resourceGroupID,omitempty"` + // resourceTags is a list of additional tags to apply to Alibaba Cloud resources created for the cluster. + // +kubebuilder:validation:MaxItems=20 + // +listType=map + // +listMapKey=key + // +optional + ResourceTags []AlibabaCloudResourceTag `json:"resourceTags,omitempty"` +} + +// AlibabaCloudResourceTag is the set of tags to add to apply to resources. +type AlibabaCloudResourceTag struct { + // key is the key of the tag. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +required + Key string `json:"key"` + // value is the value of the tag. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +required + Value string `json:"value"` +} + +// NutanixPlatformLoadBalancer defines the load balancer used by the cluster on Nutanix platform. +// +union +type NutanixPlatformLoadBalancer struct { + // type defines the type of load balancer used by the cluster on Nutanix platform + // which can be a user-managed or openshift-managed load balancer + // that is to be used for the OpenShift API and Ingress endpoints. + // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing + // defined in the machine config operator will be deployed. + // When set to UserManaged these static pods will not be deployed and it is expected that + // the load balancer is configured out of band by the deployer. + // When omitted, this means no opinion and the platform is left to choose a reasonable default. + // The default value is OpenShiftManagedDefault. + // +default="OpenShiftManagedDefault" + // +kubebuilder:default:="OpenShiftManagedDefault" + // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set" + // +optional + // +unionDiscriminator + Type PlatformLoadBalancerType `json:"type,omitempty"` +} + +// NutanixPlatformSpec holds the desired state of the Nutanix infrastructure provider. +// This only includes fields that can be modified in the cluster. +type NutanixPlatformSpec struct { + // prismCentral holds the endpoint address and port to access the Nutanix Prism Central. + // When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. + // Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the + // proxy spec.noProxy list. + // +required + PrismCentral NutanixPrismEndpoint `json:"prismCentral"` + + // prismElements holds one or more endpoint address and port data to access the Nutanix + // Prism Elements (clusters) of the Nutanix Prism Central. Currently we only support one + // Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.) + // used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.) + // spread over multiple Prism Elements (clusters) of the Prism Central. + // +required + // +listType=map + // +listMapKey=name + PrismElements []NutanixPrismElementEndpoint `json:"prismElements"` + + // failureDomains configures failure domains information for the Nutanix platform. + // When set, the failure domains defined here may be used to spread Machines across + // prism element clusters to improve fault tolerance of the cluster. + // +openshift:validation:FeatureGateAwareMaxItems:featureGate=NutanixMultiSubnets,maxItems=32 + // +listType=map + // +listMapKey=name + // +optional + FailureDomains []NutanixFailureDomain `json:"failureDomains"` +} + +// NutanixFailureDomain configures failure domain information for the Nutanix platform. +type NutanixFailureDomain struct { + // name defines the unique name of a failure domain. + // Name is required and must be at most 64 characters in length. + // It must consist of only lower case alphanumeric characters and hyphens (-). + // It must start and end with an alphanumeric character. + // This value is arbitrary and is used to identify the failure domain within the platform. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=64 + // +kubebuilder:validation:Pattern=`[a-z0-9]([-a-z0-9]*[a-z0-9])?` + Name string `json:"name"` + + // cluster is to identify the cluster (the Prism Element under management of the Prism Central), + // in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained + // from the Prism Central console or using the prism_central API. + // +required + Cluster NutanixResourceIdentifier `json:"cluster"` + + // subnets holds a list of identifiers (one or more) of the cluster's network subnets + // If the feature gate NutanixMultiSubnets is enabled, up to 32 subnets may be configured. + // for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be + // obtained from the Prism Central console or using the prism_central API. + // +required + // +kubebuilder:validation:MinItems=1 + // +openshift:validation:FeatureGateAwareMaxItems:featureGate="",maxItems=1 + // +openshift:validation:FeatureGateAwareMaxItems:featureGate=NutanixMultiSubnets,maxItems=32 + // +openshift:validation:FeatureGateAwareXValidation:featureGate=NutanixMultiSubnets,rule="self.all(x, self.exists_one(y, x == y))",message="each subnet must be unique" + // +listType=atomic + Subnets []NutanixResourceIdentifier `json:"subnets"` +} + +// NutanixIdentifierType is an enumeration of different resource identifier types. +// +kubebuilder:validation:Enum:=UUID;Name +type NutanixIdentifierType string + +const ( + // NutanixIdentifierUUID is a resource identifier identifying the object by UUID. + NutanixIdentifierUUID NutanixIdentifierType = "UUID" + + // NutanixIdentifierName is a resource identifier identifying the object by Name. + NutanixIdentifierName NutanixIdentifierType = "Name" +) + +// NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.) +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'UUID' ? has(self.uuid) : !has(self.uuid)",message="uuid configuration is required when type is UUID, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Name' ? has(self.name) : !has(self.name)",message="name configuration is required when type is Name, and forbidden otherwise" +// +union +type NutanixResourceIdentifier struct { + // type is the identifier type to use for this resource. + // +unionDiscriminator + // +required + Type NutanixIdentifierType `json:"type"` + + // uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID. + // +optional + UUID *string `json:"uuid,omitempty"` + + // name is the resource name in the PC. It cannot be empty if the type is Name. + // +optional + Name *string `json:"name,omitempty"` +} + +// NutanixPrismEndpoint holds the endpoint address and port to access the Nutanix Prism Central or Element (cluster) +type NutanixPrismEndpoint struct { + // address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster) + // +required + // +kubebuilder:validation:MaxLength=256 + Address string `json:"address"` + + // port is the port number to access the Nutanix Prism Central or Element (cluster) + // +required + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + Port int32 `json:"port"` +} + +// NutanixPrismElementEndpoint holds the name and endpoint data for a Prism Element (cluster) +type NutanixPrismElementEndpoint struct { + // name is the name of the Prism Element (cluster). This value will correspond with + // the cluster field configured on other resources (eg Machines, PVCs, etc). + // +required + // +kubebuilder:validation:MaxLength=256 + Name string `json:"name"` + + // endpoint holds the endpoint address and port data of the Prism Element (cluster). + // When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. + // Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the + // proxy spec.noProxy list. + // +required + Endpoint NutanixPrismEndpoint `json:"endpoint"` +} + +// NutanixPlatformStatus holds the current status of the Nutanix infrastructure provider. +type NutanixPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + // + // Deprecated: Use APIServerInternalIPs instead. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // apiServerInternalIPs are the IP addresses to contact the Kubernetes API + // server that can be used by components inside the cluster, like kubelets + // using the infrastructure rather than Kubernetes networking. These are the + // IPs for a self-hosted load balancer in front of the API servers. In dual + // stack clusters this list contains two IPs otherwise only one. + // + // +kubebuilder:validation:Format=ip + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=set + APIServerInternalIPs []string `json:"apiServerInternalIPs"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + // + // Deprecated: Use IngressIPs instead. + IngressIP string `json:"ingressIP,omitempty"` + + // ingressIPs are the external IPs which route to the default ingress + // controller. The IPs are suitable targets of a wildcard DNS record used to + // resolve default route host names. In dual stack clusters this list + // contains two IPs otherwise only one. + // + // +kubebuilder:validation:Format=ip + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=set + IngressIPs []string `json:"ingressIPs"` + + // loadBalancer defines how the load balancer used by the cluster is configured. + // +default={"type": "OpenShiftManagedDefault"} + // +kubebuilder:default={"type": "OpenShiftManagedDefault"} + // +openshift:enable:FeatureGate=BareMetalLoadBalancer + // +optional + LoadBalancer *NutanixPlatformLoadBalancer `json:"loadBalancer,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InfrastructureList is +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type InfrastructureList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Infrastructure `json:"items"` +} + +// IP is an IP address (for example, "10.0.0.0" or "fd00::"). +// +kubebuilder:validation:XValidation:rule="isIP(self)",message="value must be a valid IP address" +// +kubebuilder:validation:MaxLength:=39 +// +kubebuilder:validation:MinLength:=1 +type IP string + +// CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). +// +kubebuilder:validation:XValidation:rule="isCIDR(self)",message="value must be a valid CIDR network address" +// +kubebuilder:validation:MaxLength:=43 +// +kubebuilder:validation:MinLength:=1 +type CIDR string diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go new file mode 100644 index 0000000000000..9492e08a72ccb --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_ingress.go @@ -0,0 +1,332 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Ingress holds cluster-wide information about ingress, including the default ingress domain +// used for routes. The canonical name is `cluster`. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=ingresses,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type Ingress struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec IngressSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status IngressStatus `json:"status"` +} + +type IngressSpec struct { + // domain is used to generate a default host name for a route when the + // route's host name is empty. The generated host name will follow this + // pattern: "..". + // + // It is also used as the default wildcard domain suffix for ingress. The + // default ingresscontroller domain will follow this pattern: "*.". + // + // Once set, changing domain is not currently supported. + Domain string `json:"domain"` + + // appsDomain is an optional domain to use instead of the one specified + // in the domain field when a Route is created without specifying an explicit + // host. If appsDomain is nonempty, this value is used to generate default + // host values for Route. Unlike domain, appsDomain may be modified after + // installation. + // This assumes a new ingresscontroller has been setup with a wildcard + // certificate. + // +optional + AppsDomain string `json:"appsDomain,omitempty"` + + // componentRoutes is an optional list of routes that are managed by OpenShift components + // that a cluster-admin is able to configure the hostname and serving certificate for. + // The namespace and name of each route in this list should match an existing entry in the + // status.componentRoutes list. + // + // To determine the set of configurable Routes, look at namespace and name of entries in the + // .status.componentRoutes list, where participating operators write the status of + // configurable routes. + // +optional + // +listType=map + // +listMapKey=namespace + // +listMapKey=name + ComponentRoutes []ComponentRouteSpec `json:"componentRoutes,omitempty"` + + // requiredHSTSPolicies specifies HSTS policies that are required to be set on newly created or updated routes + // matching the domainPattern/s and namespaceSelector/s that are specified in the policy. + // Each requiredHSTSPolicy must have at least a domainPattern and a maxAge to validate a route HSTS Policy route + // annotation, and affect route admission. + // + // A candidate route is checked for HSTS Policies if it has the HSTS Policy route annotation: + // "haproxy.router.openshift.io/hsts_header" + // E.g. haproxy.router.openshift.io/hsts_header: max-age=31536000;preload;includeSubDomains + // + // - For each candidate route, if it matches a requiredHSTSPolicy domainPattern and optional namespaceSelector, + // then the maxAge, preloadPolicy, and includeSubdomainsPolicy must be valid to be admitted. Otherwise, the route + // is rejected. + // - The first match, by domainPattern and optional namespaceSelector, in the ordering of the RequiredHSTSPolicies + // determines the route's admission status. + // - If the candidate route doesn't match any requiredHSTSPolicy domainPattern and optional namespaceSelector, + // then it may use any HSTS Policy annotation. + // + // The HSTS policy configuration may be changed after routes have already been created. An update to a previously + // admitted route may then fail if the updated route does not conform to the updated HSTS policy configuration. + // However, changing the HSTS policy configuration will not cause a route that is already admitted to stop working. + // + // Note that if there are no RequiredHSTSPolicies, any HSTS Policy annotation on the route is valid. + // +optional + RequiredHSTSPolicies []RequiredHSTSPolicy `json:"requiredHSTSPolicies,omitempty"` + + // loadBalancer contains the load balancer details in general which are not only specific to the underlying infrastructure + // provider of the current cluster and are required for Ingress Controller to work on OpenShift. + // +optional + LoadBalancer LoadBalancer `json:"loadBalancer,omitempty"` +} + +// IngressPlatformSpec holds the desired state of Ingress specific to the underlying infrastructure provider +// of the current cluster. Since these are used at spec-level for the underlying cluster, it +// is supposed that only one of the spec structs is set. +// +union +type IngressPlatformSpec struct { + // type is the underlying infrastructure provider for the cluster. + // Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", + // "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", + // "AlibabaCloud", "Nutanix" and "None". Individual components may not support all platforms, + // and must handle unrecognized platforms as None if they do not support that platform. + // + // +unionDiscriminator + Type PlatformType `json:"type"` + + // aws contains settings specific to the Amazon Web Services infrastructure provider. + // +optional + AWS *AWSIngressSpec `json:"aws,omitempty"` +} + +type LoadBalancer struct { + // platform holds configuration specific to the underlying + // infrastructure provider for the ingress load balancers. + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // +optional + Platform IngressPlatformSpec `json:"platform,omitempty"` +} + +// AWSIngressSpec holds the desired state of the Ingress for Amazon Web Services infrastructure provider. +// This only includes fields that can be modified in the cluster. +// +union +type AWSIngressSpec struct { + // type allows user to set a load balancer type. + // When this field is set the default ingresscontroller will get created using the specified LBType. + // If this field is not set then the default ingress controller of LBType Classic will be created. + // Valid values are: + // + // * "Classic": A Classic Load Balancer that makes routing decisions at either + // the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See + // the following for additional details: + // + // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb + // + // * "NLB": A Network Load Balancer that makes routing decisions at the + // transport layer (TCP/SSL). See the following for additional details: + // + // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb + // +unionDiscriminator + // +kubebuilder:validation:Enum:=NLB;Classic + // +required + Type AWSLBType `json:"type,omitempty"` +} + +type AWSLBType string + +const ( + // NLB is the Network Load Balancer Type of AWS. Using NLB one can set NLB load balancer type for the default ingress controller. + NLB AWSLBType = "NLB" + + // Classic is the Classic Load Balancer Type of AWS. Using CLassic one can set Classic load balancer type for the default ingress controller. + Classic AWSLBType = "Classic" +) + +// ConsumingUser is an alias for string which we add validation to. Currently only service accounts are supported. +// +kubebuilder:validation:Pattern="^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$" +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=512 +type ConsumingUser string + +// Hostname is a host name as defined by RFC-1123. +// + --- +// + The left operand of the | is the original kubebuilder hostname validation format, which is incorrect because it +// + allows upper case letters, disallows hyphen or number in the TLD, and allows labels to start/end in non-alphanumeric +// + characters. See https://bugzilla.redhat.com/show_bug.cgi?id=2039256. +// + ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$ +// + +// + The right operand of the | is a new pattern that mimics the current API route admission validation on hostname, +// + except that it allows hostnames longer than the maximum length: +// + ^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ +// + +// + Both operand patterns are made available so that modifications on ingress spec can still happen after an invalid hostname +// + was saved via validation by the incorrect left operand of the | operator. +// + +// +kubebuilder:validation:Pattern=`^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$` +type Hostname string + +type IngressStatus struct { + // componentRoutes is where participating operators place the current route status for routes whose + // hostnames and serving certificates can be customized by the cluster-admin. + // +optional + // +listType=map + // +listMapKey=namespace + // +listMapKey=name + ComponentRoutes []ComponentRouteStatus `json:"componentRoutes,omitempty"` + + // defaultPlacement is set at installation time to control which + // nodes will host the ingress router pods by default. The options are + // control-plane nodes or worker nodes. + // + // This field works by dictating how the Cluster Ingress Operator will + // consider unset replicas and nodePlacement fields in IngressController + // resources when creating the corresponding Deployments. + // + // See the documentation for the IngressController replicas and nodePlacement + // fields for more information. + // + // When omitted, the default value is Workers + // + // +kubebuilder:validation:Enum:="ControlPlane";"Workers";"" + // +optional + DefaultPlacement DefaultPlacement `json:"defaultPlacement"` +} + +// ComponentRouteSpec allows for configuration of a route's hostname and serving certificate. +type ComponentRouteSpec struct { + // namespace is the namespace of the route to customize. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of status.componentRoutes if the route is to be customized. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +required + Namespace string `json:"namespace"` + + // name is the logical name of the route to customize. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of status.componentRoutes if the route is to be customized. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +required + Name string `json:"name"` + + // hostname is the hostname that should be used by the route. + // +required + Hostname Hostname `json:"hostname"` + + // servingCertKeyPairSecret is a reference to a secret of type `kubernetes.io/tls` in the openshift-config namespace. + // The serving cert/key pair must match and will be used by the operator to fulfill the intent of serving with this name. + // If the custom hostname uses the default routing suffix of the cluster, + // the Secret specification for a serving certificate will not be needed. + // +optional + ServingCertKeyPairSecret SecretNameReference `json:"servingCertKeyPairSecret"` +} + +// ComponentRouteStatus contains information allowing configuration of a route's hostname and serving certificate. +type ComponentRouteStatus struct { + // namespace is the namespace of the route to customize. It must be a real namespace. Using an actual namespace + // ensures that no two components will conflict and the same component can be installed multiple times. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of spec.componentRoutes if the route is to be customized. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +required + Namespace string `json:"namespace"` + + // name is the logical name of the route to customize. It does not have to be the actual name of a route resource + // but it cannot be renamed. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of spec.componentRoutes if the route is to be customized. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +required + Name string `json:"name"` + + // defaultHostname is the hostname of this route prior to customization. + // +required + DefaultHostname Hostname `json:"defaultHostname"` + + // consumingUsers is a slice of ServiceAccounts that need to have read permission on the servingCertKeyPairSecret secret. + // +kubebuilder:validation:MaxItems=5 + // +optional + ConsumingUsers []ConsumingUser `json:"consumingUsers,omitempty"` + + // currentHostnames is the list of current names used by the route. Typically, this list should consist of a single + // hostname, but if multiple hostnames are supported by the route the operator may write multiple entries to this list. + // +kubebuilder:validation:MinItems=1 + // +optional + CurrentHostnames []Hostname `json:"currentHostnames,omitempty"` + + // conditions are used to communicate the state of the componentRoutes entry. + // + // Supported conditions include Available, Degraded and Progressing. + // + // If available is true, the content served by the route can be accessed by users. This includes cases + // where a default may continue to serve content while the customized route specified by the cluster-admin + // is being configured. + // + // If Degraded is true, that means something has gone wrong trying to handle the componentRoutes entry. + // The currentHostnames field may or may not be in effect. + // + // If Progressing is true, that means the component is taking some action related to the componentRoutes entry. + // +optional + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied. + // +kubebuilder:validation:MinItems=1 + // +required + RelatedObjects []ObjectReference `json:"relatedObjects"` +} + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:compatibility-gen:level=1 +type IngressList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Ingress `json:"items"` +} + +// DefaultPlacement defines the default placement of ingress router pods. +type DefaultPlacement string + +const ( + // "Workers" is for having router pods placed on worker nodes by default. + DefaultPlacementWorkers DefaultPlacement = "Workers" + + // "ControlPlane" is for having router pods placed on control-plane nodes by default. + DefaultPlacementControlPlane DefaultPlacement = "ControlPlane" +) diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go new file mode 100644 index 0000000000000..95e55a7ffc0b1 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_network.go @@ -0,0 +1,305 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. +// Please view network.spec for an explanation on what applies when configuring this resource. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:compatibility-gen:level=1 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=networks,scope=Cluster +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type Network struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration. + // As a general rule, this SHOULD NOT be read directly. Instead, you should + // consume the NetworkStatus, as it indicates the currently deployed configuration. + // Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. + // +required + Spec NetworkSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status NetworkStatus `json:"status"` +} + +// NetworkSpec is the desired network configuration. +// As a general rule, this SHOULD NOT be read directly. Instead, you should +// consume the NetworkStatus, as it indicates the currently deployed configuration. +// Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=NetworkDiagnosticsConfig,rule="!has(self.networkDiagnostics) || !has(self.networkDiagnostics.mode) || self.networkDiagnostics.mode!='Disabled' || !has(self.networkDiagnostics.sourcePlacement) && !has(self.networkDiagnostics.targetPlacement)",message="cannot set networkDiagnostics.sourcePlacement and networkDiagnostics.targetPlacement when networkDiagnostics.mode is Disabled" +type NetworkSpec struct { + // IP address pool to use for pod IPs. + // This field is immutable after installation. + // +listType=atomic + ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"` + + // IP address pool for services. + // Currently, we only support a single entry here. + // This field is immutable after installation. + // +listType=atomic + ServiceNetwork []string `json:"serviceNetwork"` + + // networkType is the plugin that is to be deployed (e.g. OVNKubernetes). + // This should match a value that the cluster-network-operator understands, + // or else no networking will be installed. + // Currently supported values are: + // - OVNKubernetes + // This field is immutable after installation. + NetworkType string `json:"networkType"` + + // externalIP defines configuration for controllers that + // affect Service.ExternalIP. If nil, then ExternalIP is + // not allowed to be set. + // +optional + ExternalIP *ExternalIPConfig `json:"externalIP,omitempty"` + + // The port range allowed for Services of type NodePort. + // If not specified, the default of 30000-32767 will be used. + // Such Services without a NodePort specified will have one + // automatically allocated from this range. + // This parameter can be updated after the cluster is + // installed. + // +kubebuilder:validation:Pattern=`^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$` + ServiceNodePortRange string `json:"serviceNodePortRange,omitempty"` + + // networkDiagnostics defines network diagnostics configuration. + // + // Takes precedence over spec.disableNetworkDiagnostics in network.operator.openshift.io. + // If networkDiagnostics is not specified or is empty, + // and the spec.disableNetworkDiagnostics flag in network.operator.openshift.io is set to true, + // the network diagnostics feature will be disabled. + // + // +optional + // +openshift:enable:FeatureGate=NetworkDiagnosticsConfig + NetworkDiagnostics NetworkDiagnostics `json:"networkDiagnostics"` +} + +// NetworkStatus is the current network configuration. +type NetworkStatus struct { + // IP address pool to use for pod IPs. + // +listType=atomic + ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork,omitempty"` + + // IP address pool for services. + // Currently, we only support a single entry here. + // +listType=atomic + ServiceNetwork []string `json:"serviceNetwork,omitempty"` + + // networkType is the plugin that is deployed (e.g. OVNKubernetes). + NetworkType string `json:"networkType,omitempty"` + + // clusterNetworkMTU is the MTU for inter-pod networking. + ClusterNetworkMTU int `json:"clusterNetworkMTU,omitempty"` + + // migration contains the cluster network migration configuration. + Migration *NetworkMigration `json:"migration,omitempty"` + + // conditions represents the observations of a network.config current state. + // Known .status.conditions.type are: "NetworkDiagnosticsAvailable" + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +openshift:enable:FeatureGate=NetworkDiagnosticsConfig + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +// ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs +// are allocated. +type ClusterNetworkEntry struct { + // The complete block for pod IPs. + CIDR string `json:"cidr"` + + // The size (prefix) of block to allocate to each node. If this + // field is not used by the plugin, it can be left unset. + // +kubebuilder:validation:Minimum=0 + // +optional + HostPrefix uint32 `json:"hostPrefix,omitempty"` +} + +// ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field +// of a Service resource. +type ExternalIPConfig struct { + // policy is a set of restrictions applied to the ExternalIP field. + // If nil or empty, then ExternalIP is not allowed to be set. + // +optional + Policy *ExternalIPPolicy `json:"policy,omitempty"` + + // autoAssignCIDRs is a list of CIDRs from which to automatically assign + // Service.ExternalIP. These are assigned when the service is of type + // LoadBalancer. In general, this is only useful for bare-metal clusters. + // In Openshift 3.x, this was misleadingly called "IngressIPs". + // Automatically assigned External IPs are not affected by any + // ExternalIPPolicy rules. + // Currently, only one entry may be provided. + // +optional + // +listType=atomic + AutoAssignCIDRs []string `json:"autoAssignCIDRs,omitempty"` +} + +// ExternalIPPolicy configures exactly which IPs are allowed for the ExternalIP +// field in a Service. If the zero struct is supplied, then none are permitted. +// The policy controller always allows automatically assigned external IPs. +type ExternalIPPolicy struct { + // allowedCIDRs is the list of allowed CIDRs. + // +listType=atomic + AllowedCIDRs []string `json:"allowedCIDRs,omitempty"` + + // rejectedCIDRs is the list of disallowed CIDRs. These take precedence + // over allowedCIDRs. + // +optional + // +listType=atomic + RejectedCIDRs []string `json:"rejectedCIDRs,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type NetworkList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Network `json:"items"` +} + +// NetworkMigration represents the network migration status. +type NetworkMigration struct { + // networkType is the target plugin that is being deployed. + // DEPRECATED: network type migration is no longer supported, + // so this should always be unset. + // +optional + NetworkType string `json:"networkType,omitempty"` + + // mtu is the MTU configuration that is being deployed. + // +optional + MTU *MTUMigration `json:"mtu,omitempty"` +} + +// MTUMigration contains infomation about MTU migration. +type MTUMigration struct { + // network contains MTU migration configuration for the default network. + // +optional + Network *MTUMigrationValues `json:"network,omitempty"` + + // machine contains MTU migration configuration for the machine's uplink. + // +optional + Machine *MTUMigrationValues `json:"machine,omitempty"` +} + +// MTUMigrationValues contains the values for a MTU migration. +type MTUMigrationValues struct { + // to is the MTU to migrate to. + // +kubebuilder:validation:Minimum=0 + To *uint32 `json:"to"` + + // from is the MTU to migrate from. + // +kubebuilder:validation:Minimum=0 + // +optional + From *uint32 `json:"from,omitempty"` +} + +// NetworkDiagnosticsMode is an enumeration of the available network diagnostics modes +// Valid values are "", "All", "Disabled". +// +kubebuilder:validation:Enum:="";All;Disabled +type NetworkDiagnosticsMode string + +const ( + // NetworkDiagnosticsNoOpinion means that the user has no opinion and the platform is left + // to choose reasonable default. The current default is All and is a subject to change over time. + NetworkDiagnosticsNoOpinion NetworkDiagnosticsMode = "" + // NetworkDiagnosticsAll means that all network diagnostics checks are enabled + NetworkDiagnosticsAll NetworkDiagnosticsMode = "All" + // NetworkDiagnosticsDisabled means that network diagnostics is disabled + NetworkDiagnosticsDisabled NetworkDiagnosticsMode = "Disabled" +) + +// NetworkDiagnostics defines network diagnostics configuration + +type NetworkDiagnostics struct { + // mode controls the network diagnostics mode + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is All. + // + // +optional + Mode NetworkDiagnosticsMode `json:"mode"` + + // sourcePlacement controls the scheduling of network diagnostics source deployment + // + // See NetworkDiagnosticsSourcePlacement for more details about default values. + // + // +optional + SourcePlacement NetworkDiagnosticsSourcePlacement `json:"sourcePlacement"` + + // targetPlacement controls the scheduling of network diagnostics target daemonset + // + // See NetworkDiagnosticsTargetPlacement for more details about default values. + // + // +optional + TargetPlacement NetworkDiagnosticsTargetPlacement `json:"targetPlacement"` +} + +// NetworkDiagnosticsSourcePlacement defines node scheduling configuration network diagnostics source components +type NetworkDiagnosticsSourcePlacement struct { + // nodeSelector is the node selector applied to network diagnostics components + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `kubernetes.io/os: linux`. + // + // +optional + NodeSelector map[string]string `json:"nodeSelector"` + + // tolerations is a list of tolerations applied to network diagnostics components + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is an empty list. + // + // +optional + // +listType=atomic + Tolerations []corev1.Toleration `json:"tolerations"` +} + +// NetworkDiagnosticsTargetPlacement defines node scheduling configuration network diagnostics target components +type NetworkDiagnosticsTargetPlacement struct { + // nodeSelector is the node selector applied to network diagnostics components + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `kubernetes.io/os: linux`. + // + // +optional + NodeSelector map[string]string `json:"nodeSelector"` + + // tolerations is a list of tolerations applied to network diagnostics components + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `- operator: "Exists"` which means that all taints are tolerated. + // + // +optional + // +listType=atomic + Tolerations []corev1.Toleration `json:"tolerations"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_node.go b/vendor/github.com/openshift/api/config/v1/types_node.go new file mode 100644 index 0000000000000..3fc7bc0c39a1b --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_node.go @@ -0,0 +1,144 @@ +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Node holds cluster-wide information about node specific features. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1107 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=nodes,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type Node struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec NodeSpec `json:"spec"` + + // status holds observed values. + // +optional + Status NodeStatus `json:"status"` +} + +type NodeSpec struct { + // cgroupMode determines the cgroups version on the node + // +optional + CgroupMode CgroupMode `json:"cgroupMode,omitempty"` + + // workerLatencyProfile determins the how fast the kubelet is updating + // the status and corresponding reaction of the cluster + // +optional + WorkerLatencyProfile WorkerLatencyProfileType `json:"workerLatencyProfile,omitempty"` + + // minimumKubeletVersion is the lowest version of a kubelet that can join the cluster. + // Specifically, the apiserver will deny most authorization requests of kubelets that are older + // than the specified version, only allowing the kubelet to get and update its node object, and perform + // subjectaccessreviews. + // This means any kubelet that attempts to join the cluster will not be able to run any assigned workloads, + // and will eventually be marked as not ready. + // Its max length is 8, so maximum version allowed is either "9.999.99" or "99.99.99". + // Since the kubelet reports the version of the kubernetes release, not Openshift, this field references + // the underlying kubernetes version this version of Openshift is based off of. + // In other words: if an admin wishes to ensure no nodes run an older version than Openshift 4.17, then + // they should set the minimumKubeletVersion to 1.30.0. + // When comparing versions, the kubelet's version is stripped of any contents outside of major.minor.patch version. + // Thus, a kubelet with version "1.0.0-ec.0" will be compatible with minimumKubeletVersion "1.0.0" or earlier. + // +kubebuilder:validation:XValidation:rule="self == \"\" || self.matches('^[0-9]*.[0-9]*.[0-9]*$')",message="minmumKubeletVersion must be in a semver compatible format of x.y.z, or empty" + // +kubebuilder:validation:MaxLength:=8 + // +openshift:enable:FeatureGate=MinimumKubeletVersion + // +optional + MinimumKubeletVersion string `json:"minimumKubeletVersion"` +} + +type NodeStatus struct { + // conditions contain the details and the current state of the nodes.config object + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +// +kubebuilder:validation:Enum=v1;v2;"" +type CgroupMode string + +const ( + CgroupModeEmpty CgroupMode = "" // Empty string indicates to honor user set value on the system that should not be overridden by OpenShift + CgroupModeV1 CgroupMode = "v1" + CgroupModeV2 CgroupMode = "v2" + CgroupModeDefault CgroupMode = CgroupModeV1 +) + +// +kubebuilder:validation:Enum=Default;MediumUpdateAverageReaction;LowUpdateSlowReaction +type WorkerLatencyProfileType string + +const ( + // Medium Kubelet Update Frequency (heart-beat) and Average Reaction Time to unresponsive Node + MediumUpdateAverageReaction WorkerLatencyProfileType = "MediumUpdateAverageReaction" + + // Low Kubelet Update Frequency (heart-beat) and Slow Reaction Time to unresponsive Node + LowUpdateSlowReaction WorkerLatencyProfileType = "LowUpdateSlowReaction" + + // Default values of relavent Kubelet, Kube Controller Manager and Kube API Server + DefaultUpdateDefaultReaction WorkerLatencyProfileType = "Default" +) + +const ( + // DefaultNodeStatusUpdateFrequency refers to the "--node-status-update-frequency" of the kubelet in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type + DefaultNodeStatusUpdateFrequency = 10 * time.Second + // DefaultNodeMonitorGracePeriod refers to the "--node-monitor-grace-period" of the Kube Controller Manager in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type + DefaultNodeMonitorGracePeriod = 40 * time.Second + // DefaultNotReadyTolerationSeconds refers to the "--default-not-ready-toleration-seconds" of the Kube API Server in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type + DefaultNotReadyTolerationSeconds = 300 + // DefaultUnreachableTolerationSeconds refers to the "--default-unreachable-toleration-seconds" of the Kube API Server in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type + DefaultUnreachableTolerationSeconds = 300 + + // MediumNodeStatusUpdateFrequency refers to the "--node-status-update-frequency" of the kubelet in case of MediumUpdateAverageReaction WorkerLatencyProfile type + MediumNodeStatusUpdateFrequency = 20 * time.Second + // MediumNodeMonitorGracePeriod refers to the "--node-monitor-grace-period" of the Kube Controller Manager in case of MediumUpdateAverageReaction WorkerLatencyProfile type + MediumNodeMonitorGracePeriod = 2 * time.Minute + // MediumNotReadyTolerationSeconds refers to the "--default-not-ready-toleration-seconds" of the Kube API Server in case of MediumUpdateAverageReaction WorkerLatencyProfile type + MediumNotReadyTolerationSeconds = 60 + // MediumUnreachableTolerationSeconds refers to the "--default-unreachable-toleration-seconds" of the Kube API Server in case of MediumUpdateAverageReaction WorkerLatencyProfile type + MediumUnreachableTolerationSeconds = 60 + + // LowNodeStatusUpdateFrequency refers to the "--node-status-update-frequency" of the kubelet in case of LowUpdateSlowReaction WorkerLatencyProfile type + LowNodeStatusUpdateFrequency = 1 * time.Minute + // LowNodeMonitorGracePeriod refers to the "--node-monitor-grace-period" of the Kube Controller Manager in case of LowUpdateSlowReaction WorkerLatencyProfile type + LowNodeMonitorGracePeriod = 5 * time.Minute + // LowNotReadyTolerationSeconds refers to the "--default-not-ready-toleration-seconds" of the Kube API Server in case of LowUpdateSlowReaction WorkerLatencyProfile type + LowNotReadyTolerationSeconds = 60 + // LowUnreachableTolerationSeconds refers to the "--default-unreachable-toleration-seconds" of the Kube API Server in case of LowUpdateSlowReaction WorkerLatencyProfile type + LowUnreachableTolerationSeconds = 60 +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type NodeList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Node `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_oauth.go b/vendor/github.com/openshift/api/config/v1/types_oauth.go new file mode 100644 index 0000000000000..20845e4dbe93a --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_oauth.go @@ -0,0 +1,597 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// OAuth Server and Identity Provider Config + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`. +// It is used to configure the integrated OAuth server. +// This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=oauths,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type OAuth struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + // spec holds user settable values for configuration + // +required + Spec OAuthSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status OAuthStatus `json:"status"` +} + +// OAuthSpec contains desired cluster auth configuration +type OAuthSpec struct { + // identityProviders is an ordered list of ways for a user to identify themselves. + // When this list is empty, no identities are provisioned for users. + // +optional + // +listType=atomic + IdentityProviders []IdentityProvider `json:"identityProviders,omitempty"` + + // tokenConfig contains options for authorization and access tokens + TokenConfig TokenConfig `json:"tokenConfig"` + + // templates allow you to customize pages like the login page. + // +optional + Templates OAuthTemplates `json:"templates"` +} + +// OAuthStatus shows current known state of OAuth server in the cluster +type OAuthStatus struct { + // TODO Fill in with status of identityProviders and templates (and maybe tokenConfig) +} + +// TokenConfig holds the necessary configuration options for authorization and access tokens +type TokenConfig struct { + // accessTokenMaxAgeSeconds defines the maximum age of access tokens + AccessTokenMaxAgeSeconds int32 `json:"accessTokenMaxAgeSeconds,omitempty"` + + // accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect. + // +optional + AccessTokenInactivityTimeoutSeconds int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty"` + + // accessTokenInactivityTimeout defines the token inactivity timeout + // for tokens granted by any client. + // The value represents the maximum amount of time that can occur between + // consecutive uses of the token. Tokens become invalid if they are not + // used within this temporal window. The user will need to acquire a new + // token to regain access once a token times out. Takes valid time + // duration string such as "5m", "1.5h" or "2h45m". The minimum allowed + // value for duration is 300s (5 minutes). If the timeout is configured + // per client, then that value takes precedence. If the timeout value is + // not specified and the client does not override the value, then tokens + // are valid until their lifetime. + // + // WARNING: existing tokens' timeout will not be affected (lowered) by changing this value + // +optional + AccessTokenInactivityTimeout *metav1.Duration `json:"accessTokenInactivityTimeout,omitempty"` +} + +const ( + // LoginTemplateKey is the key of the login template in a secret + LoginTemplateKey = "login.html" + + // ProviderSelectionTemplateKey is the key for the provider selection template in a secret + ProviderSelectionTemplateKey = "providers.html" + + // ErrorsTemplateKey is the key for the errors template in a secret + ErrorsTemplateKey = "errors.html" + + // BindPasswordKey is the key for the LDAP bind password in a secret + BindPasswordKey = "bindPassword" + + // ClientSecretKey is the key for the oauth client secret data in a secret + ClientSecretKey = "clientSecret" + + // HTPasswdDataKey is the key for the htpasswd file data in a secret + HTPasswdDataKey = "htpasswd" +) + +// OAuthTemplates allow for customization of pages like the login page +type OAuthTemplates struct { + // login is the name of a secret that specifies a go template to use to render the login page. + // The key "login.html" is used to locate the template data. + // If specified and the secret or expected key is not found, the default login page is used. + // If the specified template is not valid, the default login page is used. + // If unspecified, the default login page is used. + // The namespace for this secret is openshift-config. + // +optional + Login SecretNameReference `json:"login"` + + // providerSelection is the name of a secret that specifies a go template to use to render + // the provider selection page. + // The key "providers.html" is used to locate the template data. + // If specified and the secret or expected key is not found, the default provider selection page is used. + // If the specified template is not valid, the default provider selection page is used. + // If unspecified, the default provider selection page is used. + // The namespace for this secret is openshift-config. + // +optional + ProviderSelection SecretNameReference `json:"providerSelection"` + + // error is the name of a secret that specifies a go template to use to render error pages + // during the authentication or grant flow. + // The key "errors.html" is used to locate the template data. + // If specified and the secret or expected key is not found, the default error page is used. + // If the specified template is not valid, the default error page is used. + // If unspecified, the default error page is used. + // The namespace for this secret is openshift-config. + // +optional + Error SecretNameReference `json:"error"` +} + +// IdentityProvider provides identities for users authenticating using credentials +type IdentityProvider struct { + // name is used to qualify the identities returned by this provider. + // - It MUST be unique and not shared by any other identity provider used + // - It MUST be a valid path segment: name cannot equal "." or ".." or contain "/" or "%" or ":" + // Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName + Name string `json:"name"` + + // mappingMethod determines how identities from this provider are mapped to users + // Defaults to "claim" + // +optional + MappingMethod MappingMethodType `json:"mappingMethod,omitempty"` + + IdentityProviderConfig `json:",inline"` +} + +// MappingMethodType specifies how new identities should be mapped to users when they log in +type MappingMethodType string + +const ( + // MappingMethodClaim provisions a user with the identity’s preferred user name. Fails if a user + // with that user name is already mapped to another identity. + // Default. + MappingMethodClaim MappingMethodType = "claim" + + // MappingMethodLookup looks up existing users already mapped to an identity but does not + // automatically provision users or identities. Requires identities and users be set up + // manually or using an external process. + MappingMethodLookup MappingMethodType = "lookup" + + // MappingMethodAdd provisions a user with the identity’s preferred user name. If a user with + // that user name already exists, the identity is mapped to the existing user, adding to any + // existing identity mappings for the user. + MappingMethodAdd MappingMethodType = "add" +) + +type IdentityProviderType string + +const ( + // IdentityProviderTypeBasicAuth provides identities for users authenticating with HTTP Basic Auth + IdentityProviderTypeBasicAuth IdentityProviderType = "BasicAuth" + + // IdentityProviderTypeGitHub provides identities for users authenticating using GitHub credentials + IdentityProviderTypeGitHub IdentityProviderType = "GitHub" + + // IdentityProviderTypeGitLab provides identities for users authenticating using GitLab credentials + IdentityProviderTypeGitLab IdentityProviderType = "GitLab" + + // IdentityProviderTypeGoogle provides identities for users authenticating using Google credentials + IdentityProviderTypeGoogle IdentityProviderType = "Google" + + // IdentityProviderTypeHTPasswd provides identities from an HTPasswd file + IdentityProviderTypeHTPasswd IdentityProviderType = "HTPasswd" + + // IdentityProviderTypeKeystone provides identitities for users authenticating using keystone password credentials + IdentityProviderTypeKeystone IdentityProviderType = "Keystone" + + // IdentityProviderTypeLDAP provides identities for users authenticating using LDAP credentials + IdentityProviderTypeLDAP IdentityProviderType = "LDAP" + + // IdentityProviderTypeOpenID provides identities for users authenticating using OpenID credentials + IdentityProviderTypeOpenID IdentityProviderType = "OpenID" + + // IdentityProviderTypeRequestHeader provides identities for users authenticating using request header credentials + IdentityProviderTypeRequestHeader IdentityProviderType = "RequestHeader" +) + +// IdentityProviderConfig contains configuration for using a specific identity provider +type IdentityProviderConfig struct { + // type identifies the identity provider type for this entry. + Type IdentityProviderType `json:"type"` + + // Provider-specific configuration + // The json tag MUST match the `Type` specified above, case-insensitively + // e.g. For `Type: "LDAP"`, the `ldap` configuration should be provided + + // basicAuth contains configuration options for the BasicAuth IdP + // +optional + BasicAuth *BasicAuthIdentityProvider `json:"basicAuth,omitempty"` + + // github enables user authentication using GitHub credentials + // +optional + GitHub *GitHubIdentityProvider `json:"github,omitempty"` + + // gitlab enables user authentication using GitLab credentials + // +optional + GitLab *GitLabIdentityProvider `json:"gitlab,omitempty"` + + // google enables user authentication using Google credentials + // +optional + Google *GoogleIdentityProvider `json:"google,omitempty"` + + // htpasswd enables user authentication using an HTPasswd file to validate credentials + // +optional + HTPasswd *HTPasswdIdentityProvider `json:"htpasswd,omitempty"` + + // keystone enables user authentication using keystone password credentials + // +optional + Keystone *KeystoneIdentityProvider `json:"keystone,omitempty"` + + // ldap enables user authentication using LDAP credentials + // +optional + LDAP *LDAPIdentityProvider `json:"ldap,omitempty"` + + // openID enables user authentication using OpenID credentials + // +optional + OpenID *OpenIDIdentityProvider `json:"openID,omitempty"` + + // requestHeader enables user authentication using request header credentials + // +optional + RequestHeader *RequestHeaderIdentityProvider `json:"requestHeader,omitempty"` +} + +// BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials +type BasicAuthIdentityProvider struct { + // OAuthRemoteConnectionInfo contains information about how to connect to the external basic auth server + OAuthRemoteConnectionInfo `json:",inline"` +} + +// OAuthRemoteConnectionInfo holds information necessary for establishing a remote connection +type OAuthRemoteConnectionInfo struct { + // url is the remote URL to connect to + URL string `json:"url"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` + + // tlsClientCert is an optional reference to a secret by name that contains the + // PEM-encoded TLS client certificate to present when connecting to the server. + // The key "tls.crt" is used to locate the data. + // If specified and the secret or expected key is not found, the identity provider is not honored. + // If the specified certificate data is not valid, the identity provider is not honored. + // The namespace for this secret is openshift-config. + // +optional + TLSClientCert SecretNameReference `json:"tlsClientCert"` + + // tlsClientKey is an optional reference to a secret by name that contains the + // PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. + // The key "tls.key" is used to locate the data. + // If specified and the secret or expected key is not found, the identity provider is not honored. + // If the specified certificate data is not valid, the identity provider is not honored. + // The namespace for this secret is openshift-config. + // +optional + TLSClientKey SecretNameReference `json:"tlsClientKey"` +} + +// HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials +type HTPasswdIdentityProvider struct { + // fileData is a required reference to a secret by name containing the data to use as the htpasswd file. + // The key "htpasswd" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // If the specified htpasswd data is not valid, the identity provider is not honored. + // The namespace for this secret is openshift-config. + FileData SecretNameReference `json:"fileData"` +} + +// LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials +type LDAPIdentityProvider struct { + // url is an RFC 2255 URL which specifies the LDAP search parameters to use. + // The syntax of the URL is: + // ldap://host:port/basedn?attribute?scope?filter + URL string `json:"url"` + + // bindDN is an optional DN to bind with during the search phase. + // +optional + BindDN string `json:"bindDN"` + + // bindPassword is an optional reference to a secret by name + // containing a password to bind with during the search phase. + // The key "bindPassword" is used to locate the data. + // If specified and the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + // +optional + BindPassword SecretNameReference `json:"bindPassword"` + + // insecure, if true, indicates the connection should not use TLS + // WARNING: Should not be set to `true` with the URL scheme "ldaps://" as "ldaps://" URLs always + // attempt to connect using TLS, even when `insecure` is set to `true` + // When `true`, "ldap://" URLS connect insecurely. When `false`, "ldap://" URLs are upgraded to + // a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830. + Insecure bool `json:"insecure"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` + + // attributes maps LDAP attributes to identities + Attributes LDAPAttributeMapping `json:"attributes"` +} + +// LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields +type LDAPAttributeMapping struct { + // id is the list of attributes whose values should be used as the user ID. Required. + // First non-empty attribute is used. At least one attribute is required. If none of the listed + // attribute have a value, authentication fails. + // LDAP standard identity attribute is "dn" + ID []string `json:"id"` + + // preferredUsername is the list of attributes whose values should be used as the preferred username. + // LDAP standard login attribute is "uid" + // +optional + PreferredUsername []string `json:"preferredUsername,omitempty"` + + // name is the list of attributes whose values should be used as the display name. Optional. + // If unspecified, no display name is set for the identity + // LDAP standard display name attribute is "cn" + // +optional + Name []string `json:"name,omitempty"` + + // email is the list of attributes whose values should be used as the email address. Optional. + // If unspecified, no email is set for the identity + // +optional + Email []string `json:"email,omitempty"` +} + +// KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials +type KeystoneIdentityProvider struct { + // OAuthRemoteConnectionInfo contains information about how to connect to the keystone server + OAuthRemoteConnectionInfo `json:",inline"` + + // domainName is required for keystone v3 + DomainName string `json:"domainName"` + + // TODO if we ever add support for 3.11 to 4.0 upgrades, add this configuration + // useUsernameIdentity indicates that users should be authenticated by username, not keystone ID + // DEPRECATED - only use this option for legacy systems to ensure backwards compatibility + // +optional + // UseUsernameIdentity bool `json:"useUsernameIdentity"` +} + +// RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials +type RequestHeaderIdentityProvider struct { + // loginURL is a URL to redirect unauthenticated /authorize requests to + // Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here + // ${url} is replaced with the current URL, escaped to be safe in a query parameter + // https://www.example.com/sso-login?then=${url} + // ${query} is replaced with the current query string + // https://www.example.com/auth-proxy/oauth/authorize?${query} + // Required when login is set to true. + LoginURL string `json:"loginURL"` + + // challengeURL is a URL to redirect unauthenticated /authorize requests to + // Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be + // redirected here. + // ${url} is replaced with the current URL, escaped to be safe in a query parameter + // https://www.example.com/sso-login?then=${url} + // ${query} is replaced with the current query string + // https://www.example.com/auth-proxy/oauth/authorize?${query} + // Required when challenge is set to true. + ChallengeURL string `json:"challengeURL"` + + // ca is a required reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // Specifically, it allows verification of incoming requests to prevent header spoofing. + // The key "ca.crt" is used to locate the data. + // If the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // The namespace for this config map is openshift-config. + ClientCA ConfigMapNameReference `json:"ca"` + + // clientCommonNames is an optional list of common names to require a match from. If empty, any + // client certificate validated against the clientCA bundle is considered authoritative. + // +optional + ClientCommonNames []string `json:"clientCommonNames,omitempty"` + + // headers is the set of headers to check for identity information + Headers []string `json:"headers"` + + // preferredUsernameHeaders is the set of headers to check for the preferred username + PreferredUsernameHeaders []string `json:"preferredUsernameHeaders"` + + // nameHeaders is the set of headers to check for the display name + NameHeaders []string `json:"nameHeaders"` + + // emailHeaders is the set of headers to check for the email address + EmailHeaders []string `json:"emailHeaders"` +} + +// GitHubIdentityProvider provides identities for users authenticating using GitHub credentials +type GitHubIdentityProvider struct { + // clientID is the oauth client ID + ClientID string `json:"clientID"` + + // clientSecret is a required reference to the secret by name containing the oauth client secret. + // The key "clientSecret" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + ClientSecret SecretNameReference `json:"clientSecret"` + + // organizations optionally restricts which organizations are allowed to log in + // +optional + Organizations []string `json:"organizations,omitempty"` + + // teams optionally restricts which teams are allowed to log in. Format is /. + // +optional + Teams []string `json:"teams,omitempty"` + + // hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of + // GitHub Enterprise. + // It must match the GitHub Enterprise settings value configured at /setup/settings#hostname. + // +optional + Hostname string `json:"hostname"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // This can only be configured when hostname is set to a non-empty value. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` +} + +// GitLabIdentityProvider provides identities for users authenticating using GitLab credentials +type GitLabIdentityProvider struct { + // clientID is the oauth client ID + ClientID string `json:"clientID"` + + // clientSecret is a required reference to the secret by name containing the oauth client secret. + // The key "clientSecret" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + ClientSecret SecretNameReference `json:"clientSecret"` + + // url is the oauth server base URL + URL string `json:"url"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` +} + +// GoogleIdentityProvider provides identities for users authenticating using Google credentials +type GoogleIdentityProvider struct { + // clientID is the oauth client ID + ClientID string `json:"clientID"` + + // clientSecret is a required reference to the secret by name containing the oauth client secret. + // The key "clientSecret" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + ClientSecret SecretNameReference `json:"clientSecret"` + + // hostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to + // +optional + HostedDomain string `json:"hostedDomain"` +} + +// OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials +type OpenIDIdentityProvider struct { + // clientID is the oauth client ID + ClientID string `json:"clientID"` + + // clientSecret is a required reference to the secret by name containing the oauth client secret. + // The key "clientSecret" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + ClientSecret SecretNameReference `json:"clientSecret"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` + + // extraScopes are any scopes to request in addition to the standard "openid" scope. + // +optional + ExtraScopes []string `json:"extraScopes,omitempty"` + + // extraAuthorizeParameters are any custom parameters to add to the authorize request. + // +optional + ExtraAuthorizeParameters map[string]string `json:"extraAuthorizeParameters,omitempty"` + + // issuer is the URL that the OpenID Provider asserts as its Issuer Identifier. + // It must use the https scheme with no query or fragment component. + Issuer string `json:"issuer"` + + // claims mappings + Claims OpenIDClaims `json:"claims"` +} + +// UserIDClaim is the claim used to provide a stable identifier for OIDC identities. +// Per http://openid.net/specs/openid-connect-core-1_0.html#ClaimStability +// +// "The sub (subject) and iss (issuer) Claims, used together, are the only Claims that an RP can +// rely upon as a stable identifier for the End-User, since the sub Claim MUST be locally unique +// and never reassigned within the Issuer for a particular End-User, as described in Section 2. +// Therefore, the only guaranteed unique identifier for a given End-User is the combination of the +// iss Claim and the sub Claim." +const UserIDClaim = "sub" + +// OpenIDClaim represents a claim retrieved from an OpenID provider's tokens or userInfo +// responses +// +kubebuilder:validation:MinLength=1 +type OpenIDClaim string + +// OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider +type OpenIDClaims struct { + // preferredUsername is the list of claims whose values should be used as the preferred username. + // If unspecified, the preferred username is determined from the value of the sub claim + // +listType=atomic + // +optional + PreferredUsername []string `json:"preferredUsername,omitempty"` + + // name is the list of claims whose values should be used as the display name. Optional. + // If unspecified, no display name is set for the identity + // +listType=atomic + // +optional + Name []string `json:"name,omitempty"` + + // email is the list of claims whose values should be used as the email address. Optional. + // If unspecified, no email is set for the identity + // +listType=atomic + // +optional + Email []string `json:"email,omitempty"` + + // groups is the list of claims value of which should be used to synchronize groups + // from the OIDC provider to OpenShift for the user. + // If multiple claims are specified, the first one with a non-empty value is used. + // +listType=atomic + // +optional + Groups []OpenIDClaim `json:"groups,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuthList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []OAuth `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go new file mode 100644 index 0000000000000..1fddfa51e5b9d --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go @@ -0,0 +1,96 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// OperatorHubSpec defines the desired state of OperatorHub +type OperatorHubSpec struct { + // disableAllDefaultSources allows you to disable all the default hub + // sources. If this is true, a specific entry in sources can be used to + // enable a default source. If this is false, a specific entry in + // sources can be used to disable or enable a default source. + // +optional + DisableAllDefaultSources bool `json:"disableAllDefaultSources,omitempty"` + // sources is the list of default hub sources and their configuration. + // If the list is empty, it implies that the default hub sources are + // enabled on the cluster unless disableAllDefaultSources is true. + // If disableAllDefaultSources is true and sources is not empty, + // the configuration present in sources will take precedence. The list of + // default hub sources and their current state will always be reflected in + // the status block. + // +optional + Sources []HubSource `json:"sources,omitempty"` +} + +// OperatorHubStatus defines the observed state of OperatorHub. The current +// state of the default hub sources will always be reflected here. +type OperatorHubStatus struct { + // sources encapsulates the result of applying the configuration for each + // hub source + Sources []HubSourceStatus `json:"sources,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OperatorHub is the Schema for the operatorhubs API. It can be used to change +// the state of the default hub sources for OperatorHub on the cluster from +// enabled to disabled and vice versa. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=operatorhubs,scope=Cluster +// +kubebuilder:subresource:status +// +genclient +// +genclient:nonNamespaced +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=marketplace,operatorOrdering=01 +// +openshift:capability=marketplace +// +openshift:compatibility-gen:level=1 +type OperatorHub struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + Spec OperatorHubSpec `json:"spec"` + Status OperatorHubStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OperatorHubList contains a list of OperatorHub +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OperatorHubList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + Items []OperatorHub `json:"items"` +} + +// HubSource is used to specify the hub source and its configuration +type HubSource struct { + // name is the name of one of the default hub sources + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:Required + Name string `json:"name"` + // disabled is used to disable a default hub source on cluster + // +kubebuilder:Required + Disabled bool `json:"disabled"` +} + +// HubSourceStatus is used to reflect the current state of applying the +// configuration to a default source +type HubSourceStatus struct { + HubSource `json:",omitempty"` + // status indicates success or failure in applying the configuration + Status string `json:"status,omitempty"` + // message provides more information regarding failures + Message string `json:"message,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_project.go b/vendor/github.com/openshift/api/config/v1/types_project.go new file mode 100644 index 0000000000000..3d219862be362 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_project.go @@ -0,0 +1,70 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Project holds cluster-wide information about Project. The canonical name is `cluster` +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=projects,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type Project struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec ProjectSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ProjectStatus `json:"status"` +} + +// TemplateReference references a template in a specific namespace. +// The namespace must be specified at the point of use. +type TemplateReference struct { + // name is the metadata.name of the referenced project request template + Name string `json:"name"` +} + +// ProjectSpec holds the project creation configuration. +type ProjectSpec struct { + // projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint + // +optional + ProjectRequestMessage string `json:"projectRequestMessage"` + + // projectRequestTemplate is the template to use for creating projects in response to projectrequest. + // This must point to a template in 'openshift-config' namespace. It is optional. + // If it is not specified, a default template is used. + // + // +optional + ProjectRequestTemplate TemplateReference `json:"projectRequestTemplate"` +} + +type ProjectStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ProjectList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Project `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go new file mode 100644 index 0000000000000..ed40176ce364e --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_proxy.go @@ -0,0 +1,110 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster` +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=proxies,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type Proxy struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user-settable values for the proxy configuration + // +required + Spec ProxySpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ProxyStatus `json:"status"` +} + +// ProxySpec contains cluster proxy creation configuration. +type ProxySpec struct { + // httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var. + // +optional + HTTPProxy string `json:"httpProxy,omitempty"` + + // httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var. + // +optional + HTTPSProxy string `json:"httpsProxy,omitempty"` + + // noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. + // Empty means unset and will not result in an env var. + // +optional + NoProxy string `json:"noProxy,omitempty"` + + // readinessEndpoints is a list of endpoints used to verify readiness of the proxy. + // +optional + ReadinessEndpoints []string `json:"readinessEndpoints,omitempty"` + + // trustedCA is a reference to a ConfigMap containing a CA certificate bundle. + // The trustedCA field should only be consumed by a proxy validator. The + // validator is responsible for reading the certificate bundle from the required + // key "ca-bundle.crt", merging it with the system default trust bundle, + // and writing the merged trust bundle to a ConfigMap named "trusted-ca-bundle" + // in the "openshift-config-managed" namespace. Clients that expect to make + // proxy connections must use the trusted-ca-bundle for all HTTPS requests to + // the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as + // well. + // + // The namespace for the ConfigMap referenced by trustedCA is + // "openshift-config". Here is an example ConfigMap (in yaml): + // + // apiVersion: v1 + // kind: ConfigMap + // metadata: + // name: user-ca-bundle + // namespace: openshift-config + // data: + // ca-bundle.crt: | + // -----BEGIN CERTIFICATE----- + // Custom CA certificate bundle. + // -----END CERTIFICATE----- + // + // +optional + TrustedCA ConfigMapNameReference `json:"trustedCA,omitempty"` +} + +// ProxyStatus shows current known state of the cluster proxy. +type ProxyStatus struct { + // httpProxy is the URL of the proxy for HTTP requests. + // +optional + HTTPProxy string `json:"httpProxy,omitempty"` + + // httpsProxy is the URL of the proxy for HTTPS requests. + // +optional + HTTPSProxy string `json:"httpsProxy,omitempty"` + + // noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. + // +optional + NoProxy string `json:"noProxy,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ProxyList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Proxy `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go new file mode 100644 index 0000000000000..c90d5633f68d6 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_scheduling.go @@ -0,0 +1,144 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Scheduler holds cluster-wide config information to run the Kubernetes Scheduler +// and influence its placement decisions. The canonical name for this config is `cluster`. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=schedulers,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type Scheduler struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec SchedulerSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status SchedulerStatus `json:"status"` +} + +type SchedulerSpec struct { + // DEPRECATED: the scheduler Policy API has been deprecated and will be removed in a future release. + // policy is a reference to a ConfigMap containing scheduler policy which has + // user specified predicates and priorities. If this ConfigMap is not available + // scheduler will default to use DefaultAlgorithmProvider. + // The namespace for this configmap is openshift-config. + // +optional + Policy ConfigMapNameReference `json:"policy,omitempty"` + // profile sets which scheduling profile should be set in order to configure scheduling + // decisions for new pods. + // + // Valid values are "LowNodeUtilization", "HighNodeUtilization", "NoScoring" + // Defaults to "LowNodeUtilization" + // +optional + Profile SchedulerProfile `json:"profile,omitempty"` + // profileCustomizations contains configuration for modifying the default behavior of existing scheduler profiles. + // +openshift:enable:FeatureGate=DynamicResourceAllocation + // +optional + ProfileCustomizations ProfileCustomizations `json:"profileCustomizations"` + // defaultNodeSelector helps set the cluster-wide default node selector to + // restrict pod placement to specific nodes. This is applied to the pods + // created in all namespaces and creates an intersection with any existing + // nodeSelectors already set on a pod, additionally constraining that pod's selector. + // For example, + // defaultNodeSelector: "type=user-node,region=east" would set nodeSelector + // field in pod spec to "type=user-node,region=east" to all pods created + // in all namespaces. Namespaces having project-wide node selectors won't be + // impacted even if this field is set. This adds an annotation section to + // the namespace. + // For example, if a new namespace is created with + // node-selector='type=user-node,region=east', + // the annotation openshift.io/node-selector: type=user-node,region=east + // gets added to the project. When the openshift.io/node-selector annotation + // is set on the project the value is used in preference to the value we are setting + // for defaultNodeSelector field. + // For instance, + // openshift.io/node-selector: "type=user-node,region=west" means + // that the default of "type=user-node,region=east" set in defaultNodeSelector + // would not be applied. + // +optional + DefaultNodeSelector string `json:"defaultNodeSelector,omitempty"` + // mastersSchedulable allows masters nodes to be schedulable. When this flag is + // turned on, all the master nodes in the cluster will be made schedulable, + // so that workload pods can run on them. The default value for this field is false, + // meaning none of the master nodes are schedulable. + // Important Note: Once the workload pods start running on the master nodes, + // extreme care must be taken to ensure that cluster-critical control plane components + // are not impacted. + // Please turn on this field after doing due diligence. + // +optional + MastersSchedulable bool `json:"mastersSchedulable"` +} + +// +kubebuilder:validation:Enum="";LowNodeUtilization;HighNodeUtilization;NoScoring +type SchedulerProfile string + +var ( + // LowNodeUtililization is the default, and defines a scheduling profile which prefers to + // spread pods evenly among nodes targeting low resource consumption on each node. + LowNodeUtilization SchedulerProfile = "LowNodeUtilization" + + // HighNodeUtilization defines a scheduling profile which packs as many pods as possible onto + // as few nodes as possible targeting a small node count but high resource usage on each node. + HighNodeUtilization SchedulerProfile = "HighNodeUtilization" + + // NoScoring defines a scheduling profile which tries to provide lower-latency scheduling + // at the expense of potentially less optimal pod placement decisions. + NoScoring SchedulerProfile = "NoScoring" +) + +// ProfileCustomizations contains various parameters for modifying the default behavior of certain profiles +type ProfileCustomizations struct { + // dynamicResourceAllocation allows to enable or disable dynamic resource allocation within the scheduler. + // Dynamic resource allocation is an API for requesting and sharing resources between pods and containers inside a pod. + // Third-party resource drivers are responsible for tracking and allocating resources. + // Different kinds of resources support arbitrary parameters for defining requirements and initialization. + // Valid values are Enabled, Disabled and omitted. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. + // The current default is Disabled. + // +optional + DynamicResourceAllocation DRAEnablement `json:"dynamicResourceAllocation"` +} + +// +kubebuilder:validation:Enum:="";"Enabled";"Disabled" +type DRAEnablement string + +var ( + // DRAEnablementEnabled enables dynamic resource allocation feature + DRAEnablementEnabled DRAEnablement = "Enabled" + // DRAEnablementDisabled disables dynamic resource allocation feature + DRAEnablementDisabled DRAEnablement = "Disabled" +) + +type SchedulerStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type SchedulerList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Scheduler `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_testreporting.go b/vendor/github.com/openshift/api/config/v1/types_testreporting.go new file mode 100644 index 0000000000000..00953957f4987 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_testreporting.go @@ -0,0 +1,45 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// TestReporting is used for origin (and potentially others) to report the test names for a given FeatureGate into +// the payload for later analysis on a per-payload basis. +// This doesn't need any CRD because it's never stored in the cluster. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:internal +type TestReporting struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +required + Spec TestReportingSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status TestReportingStatus `json:"status"` +} + +type TestReportingSpec struct { + // testsForFeatureGates is a list, indexed by FeatureGate and includes information about testing. + TestsForFeatureGates []FeatureGateTests `json:"testsForFeatureGates"` +} + +type FeatureGateTests struct { + // featureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance. + FeatureGate string `json:"featureGate"` + + // tests contains an item for every TestName + Tests []TestDetails `json:"tests"` +} + +type TestDetails struct { + // testName is the name of the test as it appears in junit XMLs. + // It does not include the suite name since the same test can be executed in many suites. + TestName string `json:"testName"` +} + +type TestReportingStatus struct { +} diff --git a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go new file mode 100644 index 0000000000000..b18ef647c2fca --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go @@ -0,0 +1,312 @@ +package v1 + +// TLSSecurityProfile defines the schema for a TLS security profile. This object +// is used by operators to apply TLS security settings to operands. +// +union +type TLSSecurityProfile struct { + // type is one of Old, Intermediate, Modern or Custom. Custom provides + // the ability to specify individual TLS security profile parameters. + // Old, Intermediate and Modern are TLS security profiles based on: + // + // https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations + // + // The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers + // are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be + // reduced. + // + // Note that the Modern profile is currently not supported because it is not + // yet well adopted by common software libraries. + // + // +unionDiscriminator + // +optional + Type TLSProfileType `json:"type"` + // old is a TLS security profile based on: + // + // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility + // + // and looks like this (yaml): + // + // ciphers: + // + // - TLS_AES_128_GCM_SHA256 + // + // - TLS_AES_256_GCM_SHA384 + // + // - TLS_CHACHA20_POLY1305_SHA256 + // + // - ECDHE-ECDSA-AES128-GCM-SHA256 + // + // - ECDHE-RSA-AES128-GCM-SHA256 + // + // - ECDHE-ECDSA-AES256-GCM-SHA384 + // + // - ECDHE-RSA-AES256-GCM-SHA384 + // + // - ECDHE-ECDSA-CHACHA20-POLY1305 + // + // - ECDHE-RSA-CHACHA20-POLY1305 + // + // - DHE-RSA-AES128-GCM-SHA256 + // + // - DHE-RSA-AES256-GCM-SHA384 + // + // - DHE-RSA-CHACHA20-POLY1305 + // + // - ECDHE-ECDSA-AES128-SHA256 + // + // - ECDHE-RSA-AES128-SHA256 + // + // - ECDHE-ECDSA-AES128-SHA + // + // - ECDHE-RSA-AES128-SHA + // + // - ECDHE-ECDSA-AES256-SHA384 + // + // - ECDHE-RSA-AES256-SHA384 + // + // - ECDHE-ECDSA-AES256-SHA + // + // - ECDHE-RSA-AES256-SHA + // + // - DHE-RSA-AES128-SHA256 + // + // - DHE-RSA-AES256-SHA256 + // + // - AES128-GCM-SHA256 + // + // - AES256-GCM-SHA384 + // + // - AES128-SHA256 + // + // - AES256-SHA256 + // + // - AES128-SHA + // + // - AES256-SHA + // + // - DES-CBC3-SHA + // + // minTLSVersion: VersionTLS10 + // + // +optional + // +nullable + Old *OldTLSProfile `json:"old,omitempty"` + // intermediate is a TLS security profile based on: + // + // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 + // + // and looks like this (yaml): + // + // ciphers: + // + // - TLS_AES_128_GCM_SHA256 + // + // - TLS_AES_256_GCM_SHA384 + // + // - TLS_CHACHA20_POLY1305_SHA256 + // + // - ECDHE-ECDSA-AES128-GCM-SHA256 + // + // - ECDHE-RSA-AES128-GCM-SHA256 + // + // - ECDHE-ECDSA-AES256-GCM-SHA384 + // + // - ECDHE-RSA-AES256-GCM-SHA384 + // + // - ECDHE-ECDSA-CHACHA20-POLY1305 + // + // - ECDHE-RSA-CHACHA20-POLY1305 + // + // - DHE-RSA-AES128-GCM-SHA256 + // + // - DHE-RSA-AES256-GCM-SHA384 + // + // minTLSVersion: VersionTLS12 + // + // +optional + // +nullable + Intermediate *IntermediateTLSProfile `json:"intermediate,omitempty"` + // modern is a TLS security profile based on: + // + // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + // + // and looks like this (yaml): + // + // ciphers: + // + // - TLS_AES_128_GCM_SHA256 + // + // - TLS_AES_256_GCM_SHA384 + // + // - TLS_CHACHA20_POLY1305_SHA256 + // + // minTLSVersion: VersionTLS13 + // + // +optional + // +nullable + Modern *ModernTLSProfile `json:"modern,omitempty"` + // custom is a user-defined TLS security profile. Be extremely careful using a custom + // profile as invalid configurations can be catastrophic. An example custom profile + // looks like this: + // + // ciphers: + // + // - ECDHE-ECDSA-CHACHA20-POLY1305 + // + // - ECDHE-RSA-CHACHA20-POLY1305 + // + // - ECDHE-RSA-AES128-GCM-SHA256 + // + // - ECDHE-ECDSA-AES128-GCM-SHA256 + // + // minTLSVersion: VersionTLS11 + // + // +optional + // +nullable + Custom *CustomTLSProfile `json:"custom,omitempty"` +} + +// OldTLSProfile is a TLS security profile based on: +// https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility +type OldTLSProfile struct{} + +// IntermediateTLSProfile is a TLS security profile based on: +// https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29 +type IntermediateTLSProfile struct{} + +// ModernTLSProfile is a TLS security profile based on: +// https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility +type ModernTLSProfile struct{} + +// CustomTLSProfile is a user-defined TLS security profile. Be extremely careful +// using a custom TLS profile as invalid configurations can be catastrophic. +type CustomTLSProfile struct { + TLSProfileSpec `json:",inline"` +} + +// TLSProfileType defines a TLS security profile type. +// +kubebuilder:validation:Enum=Old;Intermediate;Modern;Custom +type TLSProfileType string + +const ( + // Old is a TLS security profile based on: + // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility + TLSProfileOldType TLSProfileType = "Old" + // Intermediate is a TLS security profile based on: + // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29 + TLSProfileIntermediateType TLSProfileType = "Intermediate" + // Modern is a TLS security profile based on: + // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + TLSProfileModernType TLSProfileType = "Modern" + // Custom is a TLS security profile that allows for user-defined parameters. + TLSProfileCustomType TLSProfileType = "Custom" +) + +// TLSProfileSpec is the desired behavior of a TLSSecurityProfile. +type TLSProfileSpec struct { + // ciphers is used to specify the cipher algorithms that are negotiated + // during the TLS handshake. Operators may remove entries their operands + // do not support. For example, to use DES-CBC3-SHA (yaml): + // + // ciphers: + // - DES-CBC3-SHA + // + // +listType=atomic + Ciphers []string `json:"ciphers"` + // minTLSVersion is used to specify the minimal version of the TLS protocol + // that is negotiated during the TLS handshake. For example, to use TLS + // versions 1.1, 1.2 and 1.3 (yaml): + // + // minTLSVersion: VersionTLS11 + // + // NOTE: currently the highest minTLSVersion allowed is VersionTLS12 + // + MinTLSVersion TLSProtocolVersion `json:"minTLSVersion"` +} + +// TLSProtocolVersion is a way to specify the protocol version used for TLS connections. +// Protocol versions are based on the following most common TLS configurations: +// +// https://ssl-config.mozilla.org/ +// +// Note that SSLv3.0 is not a supported protocol version due to well known +// vulnerabilities such as POODLE: https://en.wikipedia.org/wiki/POODLE +// +kubebuilder:validation:Enum=VersionTLS10;VersionTLS11;VersionTLS12;VersionTLS13 +type TLSProtocolVersion string + +const ( + // VersionTLSv10 is version 1.0 of the TLS security protocol. + VersionTLS10 TLSProtocolVersion = "VersionTLS10" + // VersionTLSv11 is version 1.1 of the TLS security protocol. + VersionTLS11 TLSProtocolVersion = "VersionTLS11" + // VersionTLSv12 is version 1.2 of the TLS security protocol. + VersionTLS12 TLSProtocolVersion = "VersionTLS12" + // VersionTLSv13 is version 1.3 of the TLS security protocol. + VersionTLS13 TLSProtocolVersion = "VersionTLS13" +) + +// TLSProfiles Contains a map of TLSProfileType names to TLSProfileSpec. +// +// NOTE: The caller needs to make sure to check that these constants are valid for their binary. Not all +// entries map to values for all binaries. In the case of ties, the kube-apiserver wins. Do not fail, +// just be sure to whitelist only and everything will be ok. +var TLSProfiles = map[TLSProfileType]*TLSProfileSpec{ + TLSProfileOldType: { + Ciphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-RSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", + "ECDHE-ECDSA-CHACHA20-POLY1305", + "ECDHE-RSA-CHACHA20-POLY1305", + "DHE-RSA-AES128-GCM-SHA256", + "DHE-RSA-AES256-GCM-SHA384", + "DHE-RSA-CHACHA20-POLY1305", + "ECDHE-ECDSA-AES128-SHA256", + "ECDHE-RSA-AES128-SHA256", + "ECDHE-ECDSA-AES128-SHA", + "ECDHE-RSA-AES128-SHA", + "ECDHE-ECDSA-AES256-SHA384", + "ECDHE-RSA-AES256-SHA384", + "ECDHE-ECDSA-AES256-SHA", + "ECDHE-RSA-AES256-SHA", + "DHE-RSA-AES128-SHA256", + "DHE-RSA-AES256-SHA256", + "AES128-GCM-SHA256", + "AES256-GCM-SHA384", + "AES128-SHA256", + "AES256-SHA256", + "AES128-SHA", + "AES256-SHA", + "DES-CBC3-SHA", + }, + MinTLSVersion: VersionTLS10, + }, + TLSProfileIntermediateType: { + Ciphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-RSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", + "ECDHE-ECDSA-CHACHA20-POLY1305", + "ECDHE-RSA-CHACHA20-POLY1305", + "DHE-RSA-AES128-GCM-SHA256", + "DHE-RSA-AES256-GCM-SHA384", + }, + MinTLSVersion: VersionTLS12, + }, + TLSProfileModernType: { + Ciphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + }, + MinTLSVersion: VersionTLS13, + }, +} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..b013d4595ed31 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -0,0 +1,6109 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServer) DeepCopyInto(out *APIServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServer. +func (in *APIServer) DeepCopy() *APIServer { + if in == nil { + return nil + } + out := new(APIServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerEncryption) DeepCopyInto(out *APIServerEncryption) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerEncryption. +func (in *APIServerEncryption) DeepCopy() *APIServerEncryption { + if in == nil { + return nil + } + out := new(APIServerEncryption) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerList) DeepCopyInto(out *APIServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]APIServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerList. +func (in *APIServerList) DeepCopy() *APIServerList { + if in == nil { + return nil + } + out := new(APIServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerNamedServingCert) DeepCopyInto(out *APIServerNamedServingCert) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.ServingCertificate = in.ServingCertificate + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerNamedServingCert. +func (in *APIServerNamedServingCert) DeepCopy() *APIServerNamedServingCert { + if in == nil { + return nil + } + out := new(APIServerNamedServingCert) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerServingCerts) DeepCopyInto(out *APIServerServingCerts) { + *out = *in + if in.NamedCertificates != nil { + in, out := &in.NamedCertificates, &out.NamedCertificates + *out = make([]APIServerNamedServingCert, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerServingCerts. +func (in *APIServerServingCerts) DeepCopy() *APIServerServingCerts { + if in == nil { + return nil + } + out := new(APIServerServingCerts) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerSpec) DeepCopyInto(out *APIServerSpec) { + *out = *in + in.ServingCerts.DeepCopyInto(&out.ServingCerts) + out.ClientCA = in.ClientCA + if in.AdditionalCORSAllowedOrigins != nil { + in, out := &in.AdditionalCORSAllowedOrigins, &out.AdditionalCORSAllowedOrigins + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.Encryption = in.Encryption + if in.TLSSecurityProfile != nil { + in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile + *out = new(TLSSecurityProfile) + (*in).DeepCopyInto(*out) + } + in.Audit.DeepCopyInto(&out.Audit) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerSpec. +func (in *APIServerSpec) DeepCopy() *APIServerSpec { + if in == nil { + return nil + } + out := new(APIServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerStatus) DeepCopyInto(out *APIServerStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerStatus. +func (in *APIServerStatus) DeepCopy() *APIServerStatus { + if in == nil { + return nil + } + out := new(APIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSDNSSpec) DeepCopyInto(out *AWSDNSSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSDNSSpec. +func (in *AWSDNSSpec) DeepCopy() *AWSDNSSpec { + if in == nil { + return nil + } + out := new(AWSDNSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSIngressSpec) DeepCopyInto(out *AWSIngressSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSIngressSpec. +func (in *AWSIngressSpec) DeepCopy() *AWSIngressSpec { + if in == nil { + return nil + } + out := new(AWSIngressSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSPlatformSpec) DeepCopyInto(out *AWSPlatformSpec) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]AWSServiceEndpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformSpec. +func (in *AWSPlatformSpec) DeepCopy() *AWSPlatformSpec { + if in == nil { + return nil + } + out := new(AWSPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSPlatformStatus) DeepCopyInto(out *AWSPlatformStatus) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]AWSServiceEndpoint, len(*in)) + copy(*out, *in) + } + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make([]AWSResourceTag, len(*in)) + copy(*out, *in) + } + if in.CloudLoadBalancerConfig != nil { + in, out := &in.CloudLoadBalancerConfig, &out.CloudLoadBalancerConfig + *out = new(CloudLoadBalancerConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformStatus. +func (in *AWSPlatformStatus) DeepCopy() *AWSPlatformStatus { + if in == nil { + return nil + } + out := new(AWSPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSResourceTag) DeepCopyInto(out *AWSResourceTag) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceTag. +func (in *AWSResourceTag) DeepCopy() *AWSResourceTag { + if in == nil { + return nil + } + out := new(AWSResourceTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSServiceEndpoint) DeepCopyInto(out *AWSServiceEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSServiceEndpoint. +func (in *AWSServiceEndpoint) DeepCopy() *AWSServiceEndpoint { + if in == nil { + return nil + } + out := new(AWSServiceEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionConfig) DeepCopyInto(out *AdmissionConfig) { + *out = *in + if in.PluginConfig != nil { + in, out := &in.PluginConfig, &out.PluginConfig + *out = make(map[string]AdmissionPluginConfig, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.EnabledAdmissionPlugins != nil { + in, out := &in.EnabledAdmissionPlugins, &out.EnabledAdmissionPlugins + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DisabledAdmissionPlugins != nil { + in, out := &in.DisabledAdmissionPlugins, &out.DisabledAdmissionPlugins + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfig. +func (in *AdmissionConfig) DeepCopy() *AdmissionConfig { + if in == nil { + return nil + } + out := new(AdmissionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionPluginConfig) DeepCopyInto(out *AdmissionPluginConfig) { + *out = *in + in.Configuration.DeepCopyInto(&out.Configuration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfig. +func (in *AdmissionPluginConfig) DeepCopy() *AdmissionPluginConfig { + if in == nil { + return nil + } + out := new(AdmissionPluginConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlibabaCloudPlatformSpec) DeepCopyInto(out *AlibabaCloudPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudPlatformSpec. +func (in *AlibabaCloudPlatformSpec) DeepCopy() *AlibabaCloudPlatformSpec { + if in == nil { + return nil + } + out := new(AlibabaCloudPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlibabaCloudPlatformStatus) DeepCopyInto(out *AlibabaCloudPlatformStatus) { + *out = *in + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make([]AlibabaCloudResourceTag, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudPlatformStatus. +func (in *AlibabaCloudPlatformStatus) DeepCopy() *AlibabaCloudPlatformStatus { + if in == nil { + return nil + } + out := new(AlibabaCloudPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlibabaCloudResourceTag) DeepCopyInto(out *AlibabaCloudResourceTag) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudResourceTag. +func (in *AlibabaCloudResourceTag) DeepCopy() *AlibabaCloudResourceTag { + if in == nil { + return nil + } + out := new(AlibabaCloudResourceTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Audit) DeepCopyInto(out *Audit) { + *out = *in + if in.CustomRules != nil { + in, out := &in.CustomRules, &out.CustomRules + *out = make([]AuditCustomRule, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Audit. +func (in *Audit) DeepCopy() *Audit { + if in == nil { + return nil + } + out := new(Audit) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditConfig) DeepCopyInto(out *AuditConfig) { + *out = *in + in.PolicyConfiguration.DeepCopyInto(&out.PolicyConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditConfig. +func (in *AuditConfig) DeepCopy() *AuditConfig { + if in == nil { + return nil + } + out := new(AuditConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditCustomRule) DeepCopyInto(out *AuditCustomRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditCustomRule. +func (in *AuditCustomRule) DeepCopy() *AuditCustomRule { + if in == nil { + return nil + } + out := new(AuditCustomRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Authentication) DeepCopyInto(out *Authentication) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authentication. +func (in *Authentication) DeepCopy() *Authentication { + if in == nil { + return nil + } + out := new(Authentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Authentication) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationList) DeepCopyInto(out *AuthenticationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Authentication, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationList. +func (in *AuthenticationList) DeepCopy() *AuthenticationList { + if in == nil { + return nil + } + out := new(AuthenticationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthenticationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) { + *out = *in + out.OAuthMetadata = in.OAuthMetadata + if in.WebhookTokenAuthenticators != nil { + in, out := &in.WebhookTokenAuthenticators, &out.WebhookTokenAuthenticators + *out = make([]DeprecatedWebhookTokenAuthenticator, len(*in)) + copy(*out, *in) + } + if in.WebhookTokenAuthenticator != nil { + in, out := &in.WebhookTokenAuthenticator, &out.WebhookTokenAuthenticator + *out = new(WebhookTokenAuthenticator) + **out = **in + } + if in.OIDCProviders != nil { + in, out := &in.OIDCProviders, &out.OIDCProviders + *out = make([]OIDCProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationSpec. +func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec { + if in == nil { + return nil + } + out := new(AuthenticationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationStatus) DeepCopyInto(out *AuthenticationStatus) { + *out = *in + out.IntegratedOAuthMetadata = in.IntegratedOAuthMetadata + if in.OIDCClients != nil { + in, out := &in.OIDCClients, &out.OIDCClients + *out = make([]OIDCClientStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationStatus. +func (in *AuthenticationStatus) DeepCopy() *AuthenticationStatus { + if in == nil { + return nil + } + out := new(AuthenticationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzurePlatformSpec) DeepCopyInto(out *AzurePlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzurePlatformSpec. +func (in *AzurePlatformSpec) DeepCopy() *AzurePlatformSpec { + if in == nil { + return nil + } + out := new(AzurePlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzurePlatformStatus) DeepCopyInto(out *AzurePlatformStatus) { + *out = *in + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make([]AzureResourceTag, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzurePlatformStatus. +func (in *AzurePlatformStatus) DeepCopy() *AzurePlatformStatus { + if in == nil { + return nil + } + out := new(AzurePlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureResourceTag) DeepCopyInto(out *AzureResourceTag) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureResourceTag. +func (in *AzureResourceTag) DeepCopy() *AzureResourceTag { + if in == nil { + return nil + } + out := new(AzureResourceTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BareMetalPlatformLoadBalancer) DeepCopyInto(out *BareMetalPlatformLoadBalancer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatformLoadBalancer. +func (in *BareMetalPlatformLoadBalancer) DeepCopy() *BareMetalPlatformLoadBalancer { + if in == nil { + return nil + } + out := new(BareMetalPlatformLoadBalancer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BareMetalPlatformSpec) DeepCopyInto(out *BareMetalPlatformSpec) { + *out = *in + if in.APIServerInternalIPs != nil { + in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs + *out = make([]IP, len(*in)) + copy(*out, *in) + } + if in.IngressIPs != nil { + in, out := &in.IngressIPs, &out.IngressIPs + *out = make([]IP, len(*in)) + copy(*out, *in) + } + if in.MachineNetworks != nil { + in, out := &in.MachineNetworks, &out.MachineNetworks + *out = make([]CIDR, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatformSpec. +func (in *BareMetalPlatformSpec) DeepCopy() *BareMetalPlatformSpec { + if in == nil { + return nil + } + out := new(BareMetalPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BareMetalPlatformStatus) DeepCopyInto(out *BareMetalPlatformStatus) { + *out = *in + if in.APIServerInternalIPs != nil { + in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IngressIPs != nil { + in, out := &in.IngressIPs, &out.IngressIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(BareMetalPlatformLoadBalancer) + **out = **in + } + if in.MachineNetworks != nil { + in, out := &in.MachineNetworks, &out.MachineNetworks + *out = make([]CIDR, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatformStatus. +func (in *BareMetalPlatformStatus) DeepCopy() *BareMetalPlatformStatus { + if in == nil { + return nil + } + out := new(BareMetalPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuthIdentityProvider) DeepCopyInto(out *BasicAuthIdentityProvider) { + *out = *in + out.OAuthRemoteConnectionInfo = in.OAuthRemoteConnectionInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthIdentityProvider. +func (in *BasicAuthIdentityProvider) DeepCopy() *BasicAuthIdentityProvider { + if in == nil { + return nil + } + out := new(BasicAuthIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Build) DeepCopyInto(out *Build) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Build. +func (in *Build) DeepCopy() *Build { + if in == nil { + return nil + } + out := new(Build) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Build) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildDefaults) DeepCopyInto(out *BuildDefaults) { + *out = *in + if in.DefaultProxy != nil { + in, out := &in.DefaultProxy, &out.DefaultProxy + *out = new(ProxySpec) + (*in).DeepCopyInto(*out) + } + if in.GitProxy != nil { + in, out := &in.GitProxy, &out.GitProxy + *out = new(ProxySpec) + (*in).DeepCopyInto(*out) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]ImageLabel, len(*in)) + copy(*out, *in) + } + in.Resources.DeepCopyInto(&out.Resources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildDefaults. +func (in *BuildDefaults) DeepCopy() *BuildDefaults { + if in == nil { + return nil + } + out := new(BuildDefaults) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildList) DeepCopyInto(out *BuildList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Build, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildList. +func (in *BuildList) DeepCopy() *BuildList { + if in == nil { + return nil + } + out := new(BuildList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildOverrides) DeepCopyInto(out *BuildOverrides) { + *out = *in + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]ImageLabel, len(*in)) + copy(*out, *in) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ForcePull != nil { + in, out := &in.ForcePull, &out.ForcePull + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildOverrides. +func (in *BuildOverrides) DeepCopy() *BuildOverrides { + if in == nil { + return nil + } + out := new(BuildOverrides) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildSpec) DeepCopyInto(out *BuildSpec) { + *out = *in + out.AdditionalTrustedCA = in.AdditionalTrustedCA + in.BuildDefaults.DeepCopyInto(&out.BuildDefaults) + in.BuildOverrides.DeepCopyInto(&out.BuildOverrides) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSpec. +func (in *BuildSpec) DeepCopy() *BuildSpec { + if in == nil { + return nil + } + out := new(BuildSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertInfo) DeepCopyInto(out *CertInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertInfo. +func (in *CertInfo) DeepCopy() *CertInfo { + if in == nil { + return nil + } + out := new(CertInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientConnectionOverrides) DeepCopyInto(out *ClientConnectionOverrides) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionOverrides. +func (in *ClientConnectionOverrides) DeepCopy() *ClientConnectionOverrides { + if in == nil { + return nil + } + out := new(ClientConnectionOverrides) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudControllerManagerStatus) DeepCopyInto(out *CloudControllerManagerStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudControllerManagerStatus. +func (in *CloudControllerManagerStatus) DeepCopy() *CloudControllerManagerStatus { + if in == nil { + return nil + } + out := new(CloudControllerManagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudLoadBalancerConfig) DeepCopyInto(out *CloudLoadBalancerConfig) { + *out = *in + if in.ClusterHosted != nil { + in, out := &in.ClusterHosted, &out.ClusterHosted + *out = new(CloudLoadBalancerIPs) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudLoadBalancerConfig. +func (in *CloudLoadBalancerConfig) DeepCopy() *CloudLoadBalancerConfig { + if in == nil { + return nil + } + out := new(CloudLoadBalancerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudLoadBalancerIPs) DeepCopyInto(out *CloudLoadBalancerIPs) { + *out = *in + if in.APIIntLoadBalancerIPs != nil { + in, out := &in.APIIntLoadBalancerIPs, &out.APIIntLoadBalancerIPs + *out = make([]IP, len(*in)) + copy(*out, *in) + } + if in.APILoadBalancerIPs != nil { + in, out := &in.APILoadBalancerIPs, &out.APILoadBalancerIPs + *out = make([]IP, len(*in)) + copy(*out, *in) + } + if in.IngressLoadBalancerIPs != nil { + in, out := &in.IngressLoadBalancerIPs, &out.IngressLoadBalancerIPs + *out = make([]IP, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudLoadBalancerIPs. +func (in *CloudLoadBalancerIPs) DeepCopy() *CloudLoadBalancerIPs { + if in == nil { + return nil + } + out := new(CloudLoadBalancerIPs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCondition) DeepCopyInto(out *ClusterCondition) { + *out = *in + if in.PromQL != nil { + in, out := &in.PromQL, &out.PromQL + *out = new(PromQLClusterCondition) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCondition. +func (in *ClusterCondition) DeepCopy() *ClusterCondition { + if in == nil { + return nil + } + out := new(ClusterCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. +func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { + if in == nil { + return nil + } + out := new(ClusterNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperator) DeepCopyInto(out *ClusterOperator) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperator. +func (in *ClusterOperator) DeepCopy() *ClusterOperator { + if in == nil { + return nil + } + out := new(ClusterOperator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterOperator) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorList) DeepCopyInto(out *ClusterOperatorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterOperator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorList. +func (in *ClusterOperatorList) DeepCopy() *ClusterOperatorList { + if in == nil { + return nil + } + out := new(ClusterOperatorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterOperatorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorSpec) DeepCopyInto(out *ClusterOperatorSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorSpec. +func (in *ClusterOperatorSpec) DeepCopy() *ClusterOperatorSpec { + if in == nil { + return nil + } + out := new(ClusterOperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorStatus) DeepCopyInto(out *ClusterOperatorStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterOperatorStatusCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]OperandVersion, len(*in)) + copy(*out, *in) + } + if in.RelatedObjects != nil { + in, out := &in.RelatedObjects, &out.RelatedObjects + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + in.Extension.DeepCopyInto(&out.Extension) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorStatus. +func (in *ClusterOperatorStatus) DeepCopy() *ClusterOperatorStatus { + if in == nil { + return nil + } + out := new(ClusterOperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorStatusCondition) DeepCopyInto(out *ClusterOperatorStatusCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorStatusCondition. +func (in *ClusterOperatorStatusCondition) DeepCopy() *ClusterOperatorStatusCondition { + if in == nil { + return nil + } + out := new(ClusterOperatorStatusCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersion) DeepCopyInto(out *ClusterVersion) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersion. +func (in *ClusterVersion) DeepCopy() *ClusterVersion { + if in == nil { + return nil + } + out := new(ClusterVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterVersion) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionCapabilitiesSpec) DeepCopyInto(out *ClusterVersionCapabilitiesSpec) { + *out = *in + if in.AdditionalEnabledCapabilities != nil { + in, out := &in.AdditionalEnabledCapabilities, &out.AdditionalEnabledCapabilities + *out = make([]ClusterVersionCapability, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionCapabilitiesSpec. +func (in *ClusterVersionCapabilitiesSpec) DeepCopy() *ClusterVersionCapabilitiesSpec { + if in == nil { + return nil + } + out := new(ClusterVersionCapabilitiesSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionCapabilitiesStatus) DeepCopyInto(out *ClusterVersionCapabilitiesStatus) { + *out = *in + if in.EnabledCapabilities != nil { + in, out := &in.EnabledCapabilities, &out.EnabledCapabilities + *out = make([]ClusterVersionCapability, len(*in)) + copy(*out, *in) + } + if in.KnownCapabilities != nil { + in, out := &in.KnownCapabilities, &out.KnownCapabilities + *out = make([]ClusterVersionCapability, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionCapabilitiesStatus. +func (in *ClusterVersionCapabilitiesStatus) DeepCopy() *ClusterVersionCapabilitiesStatus { + if in == nil { + return nil + } + out := new(ClusterVersionCapabilitiesStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionList) DeepCopyInto(out *ClusterVersionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionList. +func (in *ClusterVersionList) DeepCopy() *ClusterVersionList { + if in == nil { + return nil + } + out := new(ClusterVersionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterVersionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionSpec) DeepCopyInto(out *ClusterVersionSpec) { + *out = *in + if in.DesiredUpdate != nil { + in, out := &in.DesiredUpdate, &out.DesiredUpdate + *out = new(Update) + **out = **in + } + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = new(ClusterVersionCapabilitiesSpec) + (*in).DeepCopyInto(*out) + } + if in.SignatureStores != nil { + in, out := &in.SignatureStores, &out.SignatureStores + *out = make([]SignatureStore, len(*in)) + copy(*out, *in) + } + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = make([]ComponentOverride, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionSpec. +func (in *ClusterVersionSpec) DeepCopy() *ClusterVersionSpec { + if in == nil { + return nil + } + out := new(ClusterVersionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionStatus) DeepCopyInto(out *ClusterVersionStatus) { + *out = *in + in.Desired.DeepCopyInto(&out.Desired) + if in.History != nil { + in, out := &in.History, &out.History + *out = make([]UpdateHistory, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Capabilities.DeepCopyInto(&out.Capabilities) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterOperatorStatusCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AvailableUpdates != nil { + in, out := &in.AvailableUpdates, &out.AvailableUpdates + *out = make([]Release, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConditionalUpdates != nil { + in, out := &in.ConditionalUpdates, &out.ConditionalUpdates + *out = make([]ConditionalUpdate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionStatus. +func (in *ClusterVersionStatus) DeepCopy() *ClusterVersionStatus { + if in == nil { + return nil + } + out := new(ClusterVersionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentOverride) DeepCopyInto(out *ComponentOverride) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentOverride. +func (in *ComponentOverride) DeepCopy() *ComponentOverride { + if in == nil { + return nil + } + out := new(ComponentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentRouteSpec) DeepCopyInto(out *ComponentRouteSpec) { + *out = *in + out.ServingCertKeyPairSecret = in.ServingCertKeyPairSecret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentRouteSpec. +func (in *ComponentRouteSpec) DeepCopy() *ComponentRouteSpec { + if in == nil { + return nil + } + out := new(ComponentRouteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentRouteStatus) DeepCopyInto(out *ComponentRouteStatus) { + *out = *in + if in.ConsumingUsers != nil { + in, out := &in.ConsumingUsers, &out.ConsumingUsers + *out = make([]ConsumingUser, len(*in)) + copy(*out, *in) + } + if in.CurrentHostnames != nil { + in, out := &in.CurrentHostnames, &out.CurrentHostnames + *out = make([]Hostname, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RelatedObjects != nil { + in, out := &in.RelatedObjects, &out.RelatedObjects + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentRouteStatus. +func (in *ComponentRouteStatus) DeepCopy() *ComponentRouteStatus { + if in == nil { + return nil + } + out := new(ComponentRouteStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionalUpdate) DeepCopyInto(out *ConditionalUpdate) { + *out = *in + in.Release.DeepCopyInto(&out.Release) + if in.Risks != nil { + in, out := &in.Risks, &out.Risks + *out = make([]ConditionalUpdateRisk, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionalUpdate. +func (in *ConditionalUpdate) DeepCopy() *ConditionalUpdate { + if in == nil { + return nil + } + out := new(ConditionalUpdate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionalUpdateRisk) DeepCopyInto(out *ConditionalUpdateRisk) { + *out = *in + if in.MatchingRules != nil { + in, out := &in.MatchingRules, &out.MatchingRules + *out = make([]ClusterCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionalUpdateRisk. +func (in *ConditionalUpdateRisk) DeepCopy() *ConditionalUpdateRisk { + if in == nil { + return nil + } + out := new(ConditionalUpdateRisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapFileReference) DeepCopyInto(out *ConfigMapFileReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapFileReference. +func (in *ConfigMapFileReference) DeepCopy() *ConfigMapFileReference { + if in == nil { + return nil + } + out := new(ConfigMapFileReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapNameReference) DeepCopyInto(out *ConfigMapNameReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapNameReference. +func (in *ConfigMapNameReference) DeepCopy() *ConfigMapNameReference { + if in == nil { + return nil + } + out := new(ConfigMapNameReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Console) DeepCopyInto(out *Console) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Console. +func (in *Console) DeepCopy() *Console { + if in == nil { + return nil + } + out := new(Console) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Console) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleAuthentication) DeepCopyInto(out *ConsoleAuthentication) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleAuthentication. +func (in *ConsoleAuthentication) DeepCopy() *ConsoleAuthentication { + if in == nil { + return nil + } + out := new(ConsoleAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleList) DeepCopyInto(out *ConsoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Console, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleList. +func (in *ConsoleList) DeepCopy() *ConsoleList { + if in == nil { + return nil + } + out := new(ConsoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleSpec) DeepCopyInto(out *ConsoleSpec) { + *out = *in + out.Authentication = in.Authentication + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSpec. +func (in *ConsoleSpec) DeepCopy() *ConsoleSpec { + if in == nil { + return nil + } + out := new(ConsoleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleStatus) DeepCopyInto(out *ConsoleStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleStatus. +func (in *ConsoleStatus) DeepCopy() *ConsoleStatus { + if in == nil { + return nil + } + out := new(ConsoleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomFeatureGates) DeepCopyInto(out *CustomFeatureGates) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]FeatureGateName, len(*in)) + copy(*out, *in) + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]FeatureGateName, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFeatureGates. +func (in *CustomFeatureGates) DeepCopy() *CustomFeatureGates { + if in == nil { + return nil + } + out := new(CustomFeatureGates) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomTLSProfile) DeepCopyInto(out *CustomTLSProfile) { + *out = *in + in.TLSProfileSpec.DeepCopyInto(&out.TLSProfileSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomTLSProfile. +func (in *CustomTLSProfile) DeepCopy() *CustomTLSProfile { + if in == nil { + return nil + } + out := new(CustomTLSProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNS) DeepCopyInto(out *DNS) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS. +func (in *DNS) DeepCopy() *DNS { + if in == nil { + return nil + } + out := new(DNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNS) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSList) DeepCopyInto(out *DNSList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DNS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSList. +func (in *DNSList) DeepCopy() *DNSList { + if in == nil { + return nil + } + out := new(DNSList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSPlatformSpec) DeepCopyInto(out *DNSPlatformSpec) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSDNSSpec) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSPlatformSpec. +func (in *DNSPlatformSpec) DeepCopy() *DNSPlatformSpec { + if in == nil { + return nil + } + out := new(DNSPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSSpec) DeepCopyInto(out *DNSSpec) { + *out = *in + if in.PublicZone != nil { + in, out := &in.PublicZone, &out.PublicZone + *out = new(DNSZone) + (*in).DeepCopyInto(*out) + } + if in.PrivateZone != nil { + in, out := &in.PrivateZone, &out.PrivateZone + *out = new(DNSZone) + (*in).DeepCopyInto(*out) + } + in.Platform.DeepCopyInto(&out.Platform) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSpec. +func (in *DNSSpec) DeepCopy() *DNSSpec { + if in == nil { + return nil + } + out := new(DNSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSStatus) DeepCopyInto(out *DNSStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSStatus. +func (in *DNSStatus) DeepCopy() *DNSStatus { + if in == nil { + return nil + } + out := new(DNSStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSZone) DeepCopyInto(out *DNSZone) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSZone. +func (in *DNSZone) DeepCopy() *DNSZone { + if in == nil { + return nil + } + out := new(DNSZone) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DelegatedAuthentication) DeepCopyInto(out *DelegatedAuthentication) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthentication. +func (in *DelegatedAuthentication) DeepCopy() *DelegatedAuthentication { + if in == nil { + return nil + } + out := new(DelegatedAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DelegatedAuthorization) DeepCopyInto(out *DelegatedAuthorization) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthorization. +func (in *DelegatedAuthorization) DeepCopy() *DelegatedAuthorization { + if in == nil { + return nil + } + out := new(DelegatedAuthorization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeprecatedWebhookTokenAuthenticator) DeepCopyInto(out *DeprecatedWebhookTokenAuthenticator) { + *out = *in + out.KubeConfig = in.KubeConfig + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeprecatedWebhookTokenAuthenticator. +func (in *DeprecatedWebhookTokenAuthenticator) DeepCopy() *DeprecatedWebhookTokenAuthenticator { + if in == nil { + return nil + } + out := new(DeprecatedWebhookTokenAuthenticator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EquinixMetalPlatformSpec) DeepCopyInto(out *EquinixMetalPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EquinixMetalPlatformSpec. +func (in *EquinixMetalPlatformSpec) DeepCopy() *EquinixMetalPlatformSpec { + if in == nil { + return nil + } + out := new(EquinixMetalPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EquinixMetalPlatformStatus) DeepCopyInto(out *EquinixMetalPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EquinixMetalPlatformStatus. +func (in *EquinixMetalPlatformStatus) DeepCopy() *EquinixMetalPlatformStatus { + if in == nil { + return nil + } + out := new(EquinixMetalPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdConnectionInfo) DeepCopyInto(out *EtcdConnectionInfo) { + *out = *in + if in.URLs != nil { + in, out := &in.URLs, &out.URLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdConnectionInfo. +func (in *EtcdConnectionInfo) DeepCopy() *EtcdConnectionInfo { + if in == nil { + return nil + } + out := new(EtcdConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdStorageConfig) DeepCopyInto(out *EtcdStorageConfig) { + *out = *in + in.EtcdConnectionInfo.DeepCopyInto(&out.EtcdConnectionInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStorageConfig. +func (in *EtcdStorageConfig) DeepCopy() *EtcdStorageConfig { + if in == nil { + return nil + } + out := new(EtcdStorageConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPConfig) DeepCopyInto(out *ExternalIPConfig) { + *out = *in + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(ExternalIPPolicy) + (*in).DeepCopyInto(*out) + } + if in.AutoAssignCIDRs != nil { + in, out := &in.AutoAssignCIDRs, &out.AutoAssignCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPConfig. +func (in *ExternalIPConfig) DeepCopy() *ExternalIPConfig { + if in == nil { + return nil + } + out := new(ExternalIPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPPolicy) DeepCopyInto(out *ExternalIPPolicy) { + *out = *in + if in.AllowedCIDRs != nil { + in, out := &in.AllowedCIDRs, &out.AllowedCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.RejectedCIDRs != nil { + in, out := &in.RejectedCIDRs, &out.RejectedCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPPolicy. +func (in *ExternalIPPolicy) DeepCopy() *ExternalIPPolicy { + if in == nil { + return nil + } + out := new(ExternalIPPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalPlatformSpec) DeepCopyInto(out *ExternalPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalPlatformSpec. +func (in *ExternalPlatformSpec) DeepCopy() *ExternalPlatformSpec { + if in == nil { + return nil + } + out := new(ExternalPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalPlatformStatus) DeepCopyInto(out *ExternalPlatformStatus) { + *out = *in + out.CloudControllerManager = in.CloudControllerManager + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalPlatformStatus. +func (in *ExternalPlatformStatus) DeepCopy() *ExternalPlatformStatus { + if in == nil { + return nil + } + out := new(ExternalPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGate) DeepCopyInto(out *FeatureGate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGate. +func (in *FeatureGate) DeepCopy() *FeatureGate { + if in == nil { + return nil + } + out := new(FeatureGate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FeatureGate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateAttributes) DeepCopyInto(out *FeatureGateAttributes) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateAttributes. +func (in *FeatureGateAttributes) DeepCopy() *FeatureGateAttributes { + if in == nil { + return nil + } + out := new(FeatureGateAttributes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateDetails) DeepCopyInto(out *FeatureGateDetails) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]FeatureGateAttributes, len(*in)) + copy(*out, *in) + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]FeatureGateAttributes, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateDetails. +func (in *FeatureGateDetails) DeepCopy() *FeatureGateDetails { + if in == nil { + return nil + } + out := new(FeatureGateDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateList) DeepCopyInto(out *FeatureGateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FeatureGate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateList. +func (in *FeatureGateList) DeepCopy() *FeatureGateList { + if in == nil { + return nil + } + out := new(FeatureGateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FeatureGateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateSelection) DeepCopyInto(out *FeatureGateSelection) { + *out = *in + if in.CustomNoUpgrade != nil { + in, out := &in.CustomNoUpgrade, &out.CustomNoUpgrade + *out = new(CustomFeatureGates) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateSelection. +func (in *FeatureGateSelection) DeepCopy() *FeatureGateSelection { + if in == nil { + return nil + } + out := new(FeatureGateSelection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateSpec) DeepCopyInto(out *FeatureGateSpec) { + *out = *in + in.FeatureGateSelection.DeepCopyInto(&out.FeatureGateSelection) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateSpec. +func (in *FeatureGateSpec) DeepCopy() *FeatureGateSpec { + if in == nil { + return nil + } + out := new(FeatureGateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateStatus) DeepCopyInto(out *FeatureGateStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make([]FeatureGateDetails, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateStatus. +func (in *FeatureGateStatus) DeepCopy() *FeatureGateStatus { + if in == nil { + return nil + } + out := new(FeatureGateStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateTests) DeepCopyInto(out *FeatureGateTests) { + *out = *in + if in.Tests != nil { + in, out := &in.Tests, &out.Tests + *out = make([]TestDetails, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateTests. +func (in *FeatureGateTests) DeepCopy() *FeatureGateTests { + if in == nil { + return nil + } + out := new(FeatureGateTests) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPPlatformSpec) DeepCopyInto(out *GCPPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPPlatformSpec. +func (in *GCPPlatformSpec) DeepCopy() *GCPPlatformSpec { + if in == nil { + return nil + } + out := new(GCPPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPPlatformStatus) DeepCopyInto(out *GCPPlatformStatus) { + *out = *in + if in.ResourceLabels != nil { + in, out := &in.ResourceLabels, &out.ResourceLabels + *out = make([]GCPResourceLabel, len(*in)) + copy(*out, *in) + } + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make([]GCPResourceTag, len(*in)) + copy(*out, *in) + } + if in.CloudLoadBalancerConfig != nil { + in, out := &in.CloudLoadBalancerConfig, &out.CloudLoadBalancerConfig + *out = new(CloudLoadBalancerConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPPlatformStatus. +func (in *GCPPlatformStatus) DeepCopy() *GCPPlatformStatus { + if in == nil { + return nil + } + out := new(GCPPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPResourceLabel) DeepCopyInto(out *GCPResourceLabel) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPResourceLabel. +func (in *GCPResourceLabel) DeepCopy() *GCPResourceLabel { + if in == nil { + return nil + } + out := new(GCPResourceLabel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPResourceTag) DeepCopyInto(out *GCPResourceTag) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPResourceTag. +func (in *GCPResourceTag) DeepCopy() *GCPResourceTag { + if in == nil { + return nil + } + out := new(GCPResourceTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericAPIServerConfig) DeepCopyInto(out *GenericAPIServerConfig) { + *out = *in + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + if in.CORSAllowedOrigins != nil { + in, out := &in.CORSAllowedOrigins, &out.CORSAllowedOrigins + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.AuditConfig.DeepCopyInto(&out.AuditConfig) + in.StorageConfig.DeepCopyInto(&out.StorageConfig) + in.AdmissionConfig.DeepCopyInto(&out.AdmissionConfig) + out.KubeClientConfig = in.KubeClientConfig + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericAPIServerConfig. +func (in *GenericAPIServerConfig) DeepCopy() *GenericAPIServerConfig { + if in == nil { + return nil + } + out := new(GenericAPIServerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericControllerConfig) DeepCopyInto(out *GenericControllerConfig) { + *out = *in + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + out.LeaderElection = in.LeaderElection + out.Authentication = in.Authentication + out.Authorization = in.Authorization + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericControllerConfig. +func (in *GenericControllerConfig) DeepCopy() *GenericControllerConfig { + if in == nil { + return nil + } + out := new(GenericControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitHubIdentityProvider) DeepCopyInto(out *GitHubIdentityProvider) { + *out = *in + out.ClientSecret = in.ClientSecret + if in.Organizations != nil { + in, out := &in.Organizations, &out.Organizations + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Teams != nil { + in, out := &in.Teams, &out.Teams + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.CA = in.CA + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubIdentityProvider. +func (in *GitHubIdentityProvider) DeepCopy() *GitHubIdentityProvider { + if in == nil { + return nil + } + out := new(GitHubIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitLabIdentityProvider) DeepCopyInto(out *GitLabIdentityProvider) { + *out = *in + out.ClientSecret = in.ClientSecret + out.CA = in.CA + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLabIdentityProvider. +func (in *GitLabIdentityProvider) DeepCopy() *GitLabIdentityProvider { + if in == nil { + return nil + } + out := new(GitLabIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoogleIdentityProvider) DeepCopyInto(out *GoogleIdentityProvider) { + *out = *in + out.ClientSecret = in.ClientSecret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleIdentityProvider. +func (in *GoogleIdentityProvider) DeepCopy() *GoogleIdentityProvider { + if in == nil { + return nil + } + out := new(GoogleIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTPasswdIdentityProvider) DeepCopyInto(out *HTPasswdIdentityProvider) { + *out = *in + out.FileData = in.FileData + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTPasswdIdentityProvider. +func (in *HTPasswdIdentityProvider) DeepCopy() *HTPasswdIdentityProvider { + if in == nil { + return nil + } + out := new(HTPasswdIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPServingInfo) DeepCopyInto(out *HTTPServingInfo) { + *out = *in + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPServingInfo. +func (in *HTTPServingInfo) DeepCopy() *HTTPServingInfo { + if in == nil { + return nil + } + out := new(HTTPServingInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HubSource) DeepCopyInto(out *HubSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSource. +func (in *HubSource) DeepCopy() *HubSource { + if in == nil { + return nil + } + out := new(HubSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HubSourceStatus) DeepCopyInto(out *HubSourceStatus) { + *out = *in + out.HubSource = in.HubSource + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSourceStatus. +func (in *HubSourceStatus) DeepCopy() *HubSourceStatus { + if in == nil { + return nil + } + out := new(HubSourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudPlatformSpec) DeepCopyInto(out *IBMCloudPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudPlatformSpec. +func (in *IBMCloudPlatformSpec) DeepCopy() *IBMCloudPlatformSpec { + if in == nil { + return nil + } + out := new(IBMCloudPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudPlatformStatus) DeepCopyInto(out *IBMCloudPlatformStatus) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]IBMCloudServiceEndpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudPlatformStatus. +func (in *IBMCloudPlatformStatus) DeepCopy() *IBMCloudPlatformStatus { + if in == nil { + return nil + } + out := new(IBMCloudPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudServiceEndpoint) DeepCopyInto(out *IBMCloudServiceEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudServiceEndpoint. +func (in *IBMCloudServiceEndpoint) DeepCopy() *IBMCloudServiceEndpoint { + if in == nil { + return nil + } + out := new(IBMCloudServiceEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProvider) DeepCopyInto(out *IdentityProvider) { + *out = *in + in.IdentityProviderConfig.DeepCopyInto(&out.IdentityProviderConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProvider. +func (in *IdentityProvider) DeepCopy() *IdentityProvider { + if in == nil { + return nil + } + out := new(IdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProviderConfig) DeepCopyInto(out *IdentityProviderConfig) { + *out = *in + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuthIdentityProvider) + **out = **in + } + if in.GitHub != nil { + in, out := &in.GitHub, &out.GitHub + *out = new(GitHubIdentityProvider) + (*in).DeepCopyInto(*out) + } + if in.GitLab != nil { + in, out := &in.GitLab, &out.GitLab + *out = new(GitLabIdentityProvider) + **out = **in + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(GoogleIdentityProvider) + **out = **in + } + if in.HTPasswd != nil { + in, out := &in.HTPasswd, &out.HTPasswd + *out = new(HTPasswdIdentityProvider) + **out = **in + } + if in.Keystone != nil { + in, out := &in.Keystone, &out.Keystone + *out = new(KeystoneIdentityProvider) + **out = **in + } + if in.LDAP != nil { + in, out := &in.LDAP, &out.LDAP + *out = new(LDAPIdentityProvider) + (*in).DeepCopyInto(*out) + } + if in.OpenID != nil { + in, out := &in.OpenID, &out.OpenID + *out = new(OpenIDIdentityProvider) + (*in).DeepCopyInto(*out) + } + if in.RequestHeader != nil { + in, out := &in.RequestHeader, &out.RequestHeader + *out = new(RequestHeaderIdentityProvider) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderConfig. +func (in *IdentityProviderConfig) DeepCopy() *IdentityProviderConfig { + if in == nil { + return nil + } + out := new(IdentityProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Image) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageContentPolicy) DeepCopyInto(out *ImageContentPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentPolicy. +func (in *ImageContentPolicy) DeepCopy() *ImageContentPolicy { + if in == nil { + return nil + } + out := new(ImageContentPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageContentPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageContentPolicyList) DeepCopyInto(out *ImageContentPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageContentPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentPolicyList. +func (in *ImageContentPolicyList) DeepCopy() *ImageContentPolicyList { + if in == nil { + return nil + } + out := new(ImageContentPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageContentPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageContentPolicySpec) DeepCopyInto(out *ImageContentPolicySpec) { + *out = *in + if in.RepositoryDigestMirrors != nil { + in, out := &in.RepositoryDigestMirrors, &out.RepositoryDigestMirrors + *out = make([]RepositoryDigestMirrors, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentPolicySpec. +func (in *ImageContentPolicySpec) DeepCopy() *ImageContentPolicySpec { + if in == nil { + return nil + } + out := new(ImageContentPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageDigestMirrorSet) DeepCopyInto(out *ImageDigestMirrorSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrorSet. +func (in *ImageDigestMirrorSet) DeepCopy() *ImageDigestMirrorSet { + if in == nil { + return nil + } + out := new(ImageDigestMirrorSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageDigestMirrorSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageDigestMirrorSetList) DeepCopyInto(out *ImageDigestMirrorSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageDigestMirrorSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrorSetList. +func (in *ImageDigestMirrorSetList) DeepCopy() *ImageDigestMirrorSetList { + if in == nil { + return nil + } + out := new(ImageDigestMirrorSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageDigestMirrorSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageDigestMirrorSetSpec) DeepCopyInto(out *ImageDigestMirrorSetSpec) { + *out = *in + if in.ImageDigestMirrors != nil { + in, out := &in.ImageDigestMirrors, &out.ImageDigestMirrors + *out = make([]ImageDigestMirrors, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrorSetSpec. +func (in *ImageDigestMirrorSetSpec) DeepCopy() *ImageDigestMirrorSetSpec { + if in == nil { + return nil + } + out := new(ImageDigestMirrorSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageDigestMirrorSetStatus) DeepCopyInto(out *ImageDigestMirrorSetStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrorSetStatus. +func (in *ImageDigestMirrorSetStatus) DeepCopy() *ImageDigestMirrorSetStatus { + if in == nil { + return nil + } + out := new(ImageDigestMirrorSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageDigestMirrors) DeepCopyInto(out *ImageDigestMirrors) { + *out = *in + if in.Mirrors != nil { + in, out := &in.Mirrors, &out.Mirrors + *out = make([]ImageMirror, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrors. +func (in *ImageDigestMirrors) DeepCopy() *ImageDigestMirrors { + if in == nil { + return nil + } + out := new(ImageDigestMirrors) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLabel) DeepCopyInto(out *ImageLabel) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLabel. +func (in *ImageLabel) DeepCopy() *ImageLabel { + if in == nil { + return nil + } + out := new(ImageLabel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageList) DeepCopyInto(out *ImageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Image, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList. +func (in *ImageList) DeepCopy() *ImageList { + if in == nil { + return nil + } + out := new(ImageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { + *out = *in + if in.AllowedRegistriesForImport != nil { + in, out := &in.AllowedRegistriesForImport, &out.AllowedRegistriesForImport + *out = make([]RegistryLocation, len(*in)) + copy(*out, *in) + } + if in.ExternalRegistryHostnames != nil { + in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.AdditionalTrustedCA = in.AdditionalTrustedCA + in.RegistrySources.DeepCopyInto(&out.RegistrySources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec. +func (in *ImageSpec) DeepCopy() *ImageSpec { + if in == nil { + return nil + } + out := new(ImageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStatus) DeepCopyInto(out *ImageStatus) { + *out = *in + if in.ExternalRegistryHostnames != nil { + in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStatus. +func (in *ImageStatus) DeepCopy() *ImageStatus { + if in == nil { + return nil + } + out := new(ImageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTagMirrorSet) DeepCopyInto(out *ImageTagMirrorSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrorSet. +func (in *ImageTagMirrorSet) DeepCopy() *ImageTagMirrorSet { + if in == nil { + return nil + } + out := new(ImageTagMirrorSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageTagMirrorSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTagMirrorSetList) DeepCopyInto(out *ImageTagMirrorSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageTagMirrorSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrorSetList. +func (in *ImageTagMirrorSetList) DeepCopy() *ImageTagMirrorSetList { + if in == nil { + return nil + } + out := new(ImageTagMirrorSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageTagMirrorSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTagMirrorSetSpec) DeepCopyInto(out *ImageTagMirrorSetSpec) { + *out = *in + if in.ImageTagMirrors != nil { + in, out := &in.ImageTagMirrors, &out.ImageTagMirrors + *out = make([]ImageTagMirrors, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrorSetSpec. +func (in *ImageTagMirrorSetSpec) DeepCopy() *ImageTagMirrorSetSpec { + if in == nil { + return nil + } + out := new(ImageTagMirrorSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTagMirrorSetStatus) DeepCopyInto(out *ImageTagMirrorSetStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrorSetStatus. +func (in *ImageTagMirrorSetStatus) DeepCopy() *ImageTagMirrorSetStatus { + if in == nil { + return nil + } + out := new(ImageTagMirrorSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTagMirrors) DeepCopyInto(out *ImageTagMirrors) { + *out = *in + if in.Mirrors != nil { + in, out := &in.Mirrors, &out.Mirrors + *out = make([]ImageMirror, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrors. +func (in *ImageTagMirrors) DeepCopy() *ImageTagMirrors { + if in == nil { + return nil + } + out := new(ImageTagMirrors) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Infrastructure) DeepCopyInto(out *Infrastructure) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Infrastructure. +func (in *Infrastructure) DeepCopy() *Infrastructure { + if in == nil { + return nil + } + out := new(Infrastructure) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Infrastructure) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureList) DeepCopyInto(out *InfrastructureList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Infrastructure, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureList. +func (in *InfrastructureList) DeepCopy() *InfrastructureList { + if in == nil { + return nil + } + out := new(InfrastructureList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InfrastructureList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureSpec) DeepCopyInto(out *InfrastructureSpec) { + *out = *in + out.CloudConfig = in.CloudConfig + in.PlatformSpec.DeepCopyInto(&out.PlatformSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureSpec. +func (in *InfrastructureSpec) DeepCopy() *InfrastructureSpec { + if in == nil { + return nil + } + out := new(InfrastructureSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureStatus) DeepCopyInto(out *InfrastructureStatus) { + *out = *in + if in.PlatformStatus != nil { + in, out := &in.PlatformStatus, &out.PlatformStatus + *out = new(PlatformStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureStatus. +func (in *InfrastructureStatus) DeepCopy() *InfrastructureStatus { + if in == nil { + return nil + } + out := new(InfrastructureStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ingress) DeepCopyInto(out *Ingress) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress. +func (in *Ingress) DeepCopy() *Ingress { + if in == nil { + return nil + } + out := new(Ingress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Ingress) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressList) DeepCopyInto(out *IngressList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Ingress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList. +func (in *IngressList) DeepCopy() *IngressList { + if in == nil { + return nil + } + out := new(IngressList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressPlatformSpec) DeepCopyInto(out *IngressPlatformSpec) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSIngressSpec) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressPlatformSpec. +func (in *IngressPlatformSpec) DeepCopy() *IngressPlatformSpec { + if in == nil { + return nil + } + out := new(IngressPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressSpec) DeepCopyInto(out *IngressSpec) { + *out = *in + if in.ComponentRoutes != nil { + in, out := &in.ComponentRoutes, &out.ComponentRoutes + *out = make([]ComponentRouteSpec, len(*in)) + copy(*out, *in) + } + if in.RequiredHSTSPolicies != nil { + in, out := &in.RequiredHSTSPolicies, &out.RequiredHSTSPolicies + *out = make([]RequiredHSTSPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec. +func (in *IngressSpec) DeepCopy() *IngressSpec { + if in == nil { + return nil + } + out := new(IngressSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressStatus) DeepCopyInto(out *IngressStatus) { + *out = *in + if in.ComponentRoutes != nil { + in, out := &in.ComponentRoutes, &out.ComponentRoutes + *out = make([]ComponentRouteStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus. +func (in *IngressStatus) DeepCopy() *IngressStatus { + if in == nil { + return nil + } + out := new(IngressStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntermediateTLSProfile) DeepCopyInto(out *IntermediateTLSProfile) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntermediateTLSProfile. +func (in *IntermediateTLSProfile) DeepCopy() *IntermediateTLSProfile { + if in == nil { + return nil + } + out := new(IntermediateTLSProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeystoneIdentityProvider) DeepCopyInto(out *KeystoneIdentityProvider) { + *out = *in + out.OAuthRemoteConnectionInfo = in.OAuthRemoteConnectionInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeystoneIdentityProvider. +func (in *KeystoneIdentityProvider) DeepCopy() *KeystoneIdentityProvider { + if in == nil { + return nil + } + out := new(KeystoneIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeClientConfig) DeepCopyInto(out *KubeClientConfig) { + *out = *in + out.ConnectionOverrides = in.ConnectionOverrides + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeClientConfig. +func (in *KubeClientConfig) DeepCopy() *KubeClientConfig { + if in == nil { + return nil + } + out := new(KubeClientConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtPlatformSpec) DeepCopyInto(out *KubevirtPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtPlatformSpec. +func (in *KubevirtPlatformSpec) DeepCopy() *KubevirtPlatformSpec { + if in == nil { + return nil + } + out := new(KubevirtPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtPlatformStatus) DeepCopyInto(out *KubevirtPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtPlatformStatus. +func (in *KubevirtPlatformStatus) DeepCopy() *KubevirtPlatformStatus { + if in == nil { + return nil + } + out := new(KubevirtPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPAttributeMapping) DeepCopyInto(out *LDAPAttributeMapping) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsername != nil { + in, out := &in.PreferredUsername, &out.PreferredUsername + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPAttributeMapping. +func (in *LDAPAttributeMapping) DeepCopy() *LDAPAttributeMapping { + if in == nil { + return nil + } + out := new(LDAPAttributeMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPIdentityProvider) DeepCopyInto(out *LDAPIdentityProvider) { + *out = *in + out.BindPassword = in.BindPassword + out.CA = in.CA + in.Attributes.DeepCopyInto(&out.Attributes) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPIdentityProvider. +func (in *LDAPIdentityProvider) DeepCopy() *LDAPIdentityProvider { + if in == nil { + return nil + } + out := new(LDAPIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaderElection) DeepCopyInto(out *LeaderElection) { + *out = *in + out.LeaseDuration = in.LeaseDuration + out.RenewDeadline = in.RenewDeadline + out.RetryPeriod = in.RetryPeriod + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderElection. +func (in *LeaderElection) DeepCopy() *LeaderElection { + if in == nil { + return nil + } + out := new(LeaderElection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancer) DeepCopyInto(out *LoadBalancer) { + *out = *in + in.Platform.DeepCopyInto(&out.Platform) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancer. +func (in *LoadBalancer) DeepCopy() *LoadBalancer { + if in == nil { + return nil + } + out := new(LoadBalancer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MTUMigration) DeepCopyInto(out *MTUMigration) { + *out = *in + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(MTUMigrationValues) + (*in).DeepCopyInto(*out) + } + if in.Machine != nil { + in, out := &in.Machine, &out.Machine + *out = new(MTUMigrationValues) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MTUMigration. +func (in *MTUMigration) DeepCopy() *MTUMigration { + if in == nil { + return nil + } + out := new(MTUMigration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MTUMigrationValues) DeepCopyInto(out *MTUMigrationValues) { + *out = *in + if in.To != nil { + in, out := &in.To, &out.To + *out = new(uint32) + **out = **in + } + if in.From != nil { + in, out := &in.From, &out.From + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MTUMigrationValues. +func (in *MTUMigrationValues) DeepCopy() *MTUMigrationValues { + if in == nil { + return nil + } + out := new(MTUMigrationValues) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaxAgePolicy) DeepCopyInto(out *MaxAgePolicy) { + *out = *in + if in.LargestMaxAge != nil { + in, out := &in.LargestMaxAge, &out.LargestMaxAge + *out = new(int32) + **out = **in + } + if in.SmallestMaxAge != nil { + in, out := &in.SmallestMaxAge, &out.SmallestMaxAge + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaxAgePolicy. +func (in *MaxAgePolicy) DeepCopy() *MaxAgePolicy { + if in == nil { + return nil + } + out := new(MaxAgePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModernTLSProfile) DeepCopyInto(out *ModernTLSProfile) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModernTLSProfile. +func (in *ModernTLSProfile) DeepCopy() *ModernTLSProfile { + if in == nil { + return nil + } + out := new(ModernTLSProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedCertificate) DeepCopyInto(out *NamedCertificate) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedCertificate. +func (in *NamedCertificate) DeepCopy() *NamedCertificate { + if in == nil { + return nil + } + out := new(NamedCertificate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Network) DeepCopyInto(out *Network) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. +func (in *Network) DeepCopy() *Network { + if in == nil { + return nil + } + out := new(Network) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Network) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDiagnostics) DeepCopyInto(out *NetworkDiagnostics) { + *out = *in + in.SourcePlacement.DeepCopyInto(&out.SourcePlacement) + in.TargetPlacement.DeepCopyInto(&out.TargetPlacement) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDiagnostics. +func (in *NetworkDiagnostics) DeepCopy() *NetworkDiagnostics { + if in == nil { + return nil + } + out := new(NetworkDiagnostics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDiagnosticsSourcePlacement) DeepCopyInto(out *NetworkDiagnosticsSourcePlacement) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDiagnosticsSourcePlacement. +func (in *NetworkDiagnosticsSourcePlacement) DeepCopy() *NetworkDiagnosticsSourcePlacement { + if in == nil { + return nil + } + out := new(NetworkDiagnosticsSourcePlacement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDiagnosticsTargetPlacement) DeepCopyInto(out *NetworkDiagnosticsTargetPlacement) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDiagnosticsTargetPlacement. +func (in *NetworkDiagnosticsTargetPlacement) DeepCopy() *NetworkDiagnosticsTargetPlacement { + if in == nil { + return nil + } + out := new(NetworkDiagnosticsTargetPlacement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkList) DeepCopyInto(out *NetworkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Network, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkList. +func (in *NetworkList) DeepCopy() *NetworkList { + if in == nil { + return nil + } + out := new(NetworkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkMigration) DeepCopyInto(out *NetworkMigration) { + *out = *in + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + *out = new(MTUMigration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkMigration. +func (in *NetworkMigration) DeepCopy() *NetworkMigration { + if in == nil { + return nil + } + out := new(NetworkMigration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { + *out = *in + if in.ClusterNetwork != nil { + in, out := &in.ClusterNetwork, &out.ClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.ServiceNetwork != nil { + in, out := &in.ServiceNetwork, &out.ServiceNetwork + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExternalIP != nil { + in, out := &in.ExternalIP, &out.ExternalIP + *out = new(ExternalIPConfig) + (*in).DeepCopyInto(*out) + } + in.NetworkDiagnostics.DeepCopyInto(&out.NetworkDiagnostics) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. +func (in *NetworkSpec) DeepCopy() *NetworkSpec { + if in == nil { + return nil + } + out := new(NetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) { + *out = *in + if in.ClusterNetwork != nil { + in, out := &in.ClusterNetwork, &out.ClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.ServiceNetwork != nil { + in, out := &in.ServiceNetwork, &out.ServiceNetwork + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Migration != nil { + in, out := &in.Migration, &out.Migration + *out = new(NetworkMigration) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus. +func (in *NetworkStatus) DeepCopy() *NetworkStatus { + if in == nil { + return nil + } + out := new(NetworkStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Node) DeepCopyInto(out *Node) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. +func (in *Node) DeepCopy() *Node { + if in == nil { + return nil + } + out := new(Node) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Node) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeList) DeepCopyInto(out *NodeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Node, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList. +func (in *NodeList) DeepCopy() *NodeList { + if in == nil { + return nil + } + out := new(NodeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec. +func (in *NodeSpec) DeepCopy() *NodeSpec { + if in == nil { + return nil + } + out := new(NodeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. +func (in *NodeStatus) DeepCopy() *NodeStatus { + if in == nil { + return nil + } + out := new(NodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixFailureDomain) DeepCopyInto(out *NutanixFailureDomain) { + *out = *in + in.Cluster.DeepCopyInto(&out.Cluster) + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]NutanixResourceIdentifier, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixFailureDomain. +func (in *NutanixFailureDomain) DeepCopy() *NutanixFailureDomain { + if in == nil { + return nil + } + out := new(NutanixFailureDomain) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixPlatformLoadBalancer) DeepCopyInto(out *NutanixPlatformLoadBalancer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPlatformLoadBalancer. +func (in *NutanixPlatformLoadBalancer) DeepCopy() *NutanixPlatformLoadBalancer { + if in == nil { + return nil + } + out := new(NutanixPlatformLoadBalancer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixPlatformSpec) DeepCopyInto(out *NutanixPlatformSpec) { + *out = *in + out.PrismCentral = in.PrismCentral + if in.PrismElements != nil { + in, out := &in.PrismElements, &out.PrismElements + *out = make([]NutanixPrismElementEndpoint, len(*in)) + copy(*out, *in) + } + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = make([]NutanixFailureDomain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPlatformSpec. +func (in *NutanixPlatformSpec) DeepCopy() *NutanixPlatformSpec { + if in == nil { + return nil + } + out := new(NutanixPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixPlatformStatus) DeepCopyInto(out *NutanixPlatformStatus) { + *out = *in + if in.APIServerInternalIPs != nil { + in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IngressIPs != nil { + in, out := &in.IngressIPs, &out.IngressIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(NutanixPlatformLoadBalancer) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPlatformStatus. +func (in *NutanixPlatformStatus) DeepCopy() *NutanixPlatformStatus { + if in == nil { + return nil + } + out := new(NutanixPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixPrismElementEndpoint) DeepCopyInto(out *NutanixPrismElementEndpoint) { + *out = *in + out.Endpoint = in.Endpoint + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPrismElementEndpoint. +func (in *NutanixPrismElementEndpoint) DeepCopy() *NutanixPrismElementEndpoint { + if in == nil { + return nil + } + out := new(NutanixPrismElementEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixPrismEndpoint) DeepCopyInto(out *NutanixPrismEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPrismEndpoint. +func (in *NutanixPrismEndpoint) DeepCopy() *NutanixPrismEndpoint { + if in == nil { + return nil + } + out := new(NutanixPrismEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixResourceIdentifier) DeepCopyInto(out *NutanixResourceIdentifier) { + *out = *in + if in.UUID != nil { + in, out := &in.UUID, &out.UUID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixResourceIdentifier. +func (in *NutanixResourceIdentifier) DeepCopy() *NutanixResourceIdentifier { + if in == nil { + return nil + } + out := new(NutanixResourceIdentifier) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuth) DeepCopyInto(out *OAuth) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuth. +func (in *OAuth) DeepCopy() *OAuth { + if in == nil { + return nil + } + out := new(OAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuth) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthList) DeepCopyInto(out *OAuthList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OAuth, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthList. +func (in *OAuthList) DeepCopy() *OAuthList { + if in == nil { + return nil + } + out := new(OAuthList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthRemoteConnectionInfo) DeepCopyInto(out *OAuthRemoteConnectionInfo) { + *out = *in + out.CA = in.CA + out.TLSClientCert = in.TLSClientCert + out.TLSClientKey = in.TLSClientKey + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthRemoteConnectionInfo. +func (in *OAuthRemoteConnectionInfo) DeepCopy() *OAuthRemoteConnectionInfo { + if in == nil { + return nil + } + out := new(OAuthRemoteConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthSpec) DeepCopyInto(out *OAuthSpec) { + *out = *in + if in.IdentityProviders != nil { + in, out := &in.IdentityProviders, &out.IdentityProviders + *out = make([]IdentityProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.TokenConfig.DeepCopyInto(&out.TokenConfig) + out.Templates = in.Templates + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthSpec. +func (in *OAuthSpec) DeepCopy() *OAuthSpec { + if in == nil { + return nil + } + out := new(OAuthSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthStatus) DeepCopyInto(out *OAuthStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthStatus. +func (in *OAuthStatus) DeepCopy() *OAuthStatus { + if in == nil { + return nil + } + out := new(OAuthStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthTemplates) DeepCopyInto(out *OAuthTemplates) { + *out = *in + out.Login = in.Login + out.ProviderSelection = in.ProviderSelection + out.Error = in.Error + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthTemplates. +func (in *OAuthTemplates) DeepCopy() *OAuthTemplates { + if in == nil { + return nil + } + out := new(OAuthTemplates) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCClientConfig) DeepCopyInto(out *OIDCClientConfig) { + *out = *in + out.ClientSecret = in.ClientSecret + if in.ExtraScopes != nil { + in, out := &in.ExtraScopes, &out.ExtraScopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCClientConfig. +func (in *OIDCClientConfig) DeepCopy() *OIDCClientConfig { + if in == nil { + return nil + } + out := new(OIDCClientConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCClientReference) DeepCopyInto(out *OIDCClientReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCClientReference. +func (in *OIDCClientReference) DeepCopy() *OIDCClientReference { + if in == nil { + return nil + } + out := new(OIDCClientReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCClientStatus) DeepCopyInto(out *OIDCClientStatus) { + *out = *in + if in.CurrentOIDCClients != nil { + in, out := &in.CurrentOIDCClients, &out.CurrentOIDCClients + *out = make([]OIDCClientReference, len(*in)) + copy(*out, *in) + } + if in.ConsumingUsers != nil { + in, out := &in.ConsumingUsers, &out.ConsumingUsers + *out = make([]ConsumingUser, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCClientStatus. +func (in *OIDCClientStatus) DeepCopy() *OIDCClientStatus { + if in == nil { + return nil + } + out := new(OIDCClientStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCProvider) DeepCopyInto(out *OIDCProvider) { + *out = *in + in.Issuer.DeepCopyInto(&out.Issuer) + if in.OIDCClients != nil { + in, out := &in.OIDCClients, &out.OIDCClients + *out = make([]OIDCClientConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.ClaimMappings.DeepCopyInto(&out.ClaimMappings) + if in.ClaimValidationRules != nil { + in, out := &in.ClaimValidationRules, &out.ClaimValidationRules + *out = make([]TokenClaimValidationRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCProvider. +func (in *OIDCProvider) DeepCopy() *OIDCProvider { + if in == nil { + return nil + } + out := new(OIDCProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OldTLSProfile) DeepCopyInto(out *OldTLSProfile) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OldTLSProfile. +func (in *OldTLSProfile) DeepCopy() *OldTLSProfile { + if in == nil { + return nil + } + out := new(OldTLSProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDClaims) DeepCopyInto(out *OpenIDClaims) { + *out = *in + if in.PreferredUsername != nil { + in, out := &in.PreferredUsername, &out.PreferredUsername + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]OpenIDClaim, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDClaims. +func (in *OpenIDClaims) DeepCopy() *OpenIDClaims { + if in == nil { + return nil + } + out := new(OpenIDClaims) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDIdentityProvider) DeepCopyInto(out *OpenIDIdentityProvider) { + *out = *in + out.ClientSecret = in.ClientSecret + out.CA = in.CA + if in.ExtraScopes != nil { + in, out := &in.ExtraScopes, &out.ExtraScopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExtraAuthorizeParameters != nil { + in, out := &in.ExtraAuthorizeParameters, &out.ExtraAuthorizeParameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Claims.DeepCopyInto(&out.Claims) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDIdentityProvider. +func (in *OpenIDIdentityProvider) DeepCopy() *OpenIDIdentityProvider { + if in == nil { + return nil + } + out := new(OpenIDIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackPlatformLoadBalancer) DeepCopyInto(out *OpenStackPlatformLoadBalancer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackPlatformLoadBalancer. +func (in *OpenStackPlatformLoadBalancer) DeepCopy() *OpenStackPlatformLoadBalancer { + if in == nil { + return nil + } + out := new(OpenStackPlatformLoadBalancer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackPlatformSpec) DeepCopyInto(out *OpenStackPlatformSpec) { + *out = *in + if in.APIServerInternalIPs != nil { + in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs + *out = make([]IP, len(*in)) + copy(*out, *in) + } + if in.IngressIPs != nil { + in, out := &in.IngressIPs, &out.IngressIPs + *out = make([]IP, len(*in)) + copy(*out, *in) + } + if in.MachineNetworks != nil { + in, out := &in.MachineNetworks, &out.MachineNetworks + *out = make([]CIDR, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackPlatformSpec. +func (in *OpenStackPlatformSpec) DeepCopy() *OpenStackPlatformSpec { + if in == nil { + return nil + } + out := new(OpenStackPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackPlatformStatus) DeepCopyInto(out *OpenStackPlatformStatus) { + *out = *in + if in.APIServerInternalIPs != nil { + in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IngressIPs != nil { + in, out := &in.IngressIPs, &out.IngressIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(OpenStackPlatformLoadBalancer) + **out = **in + } + if in.MachineNetworks != nil { + in, out := &in.MachineNetworks, &out.MachineNetworks + *out = make([]CIDR, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackPlatformStatus. +func (in *OpenStackPlatformStatus) DeepCopy() *OpenStackPlatformStatus { + if in == nil { + return nil + } + out := new(OpenStackPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperandVersion) DeepCopyInto(out *OperandVersion) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperandVersion. +func (in *OperandVersion) DeepCopy() *OperandVersion { + if in == nil { + return nil + } + out := new(OperandVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHub) DeepCopyInto(out *OperatorHub) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHub. +func (in *OperatorHub) DeepCopy() *OperatorHub { + if in == nil { + return nil + } + out := new(OperatorHub) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OperatorHub) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHubList) DeepCopyInto(out *OperatorHubList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OperatorHub, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubList. +func (in *OperatorHubList) DeepCopy() *OperatorHubList { + if in == nil { + return nil + } + out := new(OperatorHubList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OperatorHubList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHubSpec) DeepCopyInto(out *OperatorHubSpec) { + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]HubSource, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubSpec. +func (in *OperatorHubSpec) DeepCopy() *OperatorHubSpec { + if in == nil { + return nil + } + out := new(OperatorHubSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHubStatus) DeepCopyInto(out *OperatorHubStatus) { + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]HubSourceStatus, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubStatus. +func (in *OperatorHubStatus) DeepCopy() *OperatorHubStatus { + if in == nil { + return nil + } + out := new(OperatorHubStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OvirtPlatformLoadBalancer) DeepCopyInto(out *OvirtPlatformLoadBalancer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformLoadBalancer. +func (in *OvirtPlatformLoadBalancer) DeepCopy() *OvirtPlatformLoadBalancer { + if in == nil { + return nil + } + out := new(OvirtPlatformLoadBalancer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OvirtPlatformSpec) DeepCopyInto(out *OvirtPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformSpec. +func (in *OvirtPlatformSpec) DeepCopy() *OvirtPlatformSpec { + if in == nil { + return nil + } + out := new(OvirtPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OvirtPlatformStatus) DeepCopyInto(out *OvirtPlatformStatus) { + *out = *in + if in.APIServerInternalIPs != nil { + in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IngressIPs != nil { + in, out := &in.IngressIPs, &out.IngressIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(OvirtPlatformLoadBalancer) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformStatus. +func (in *OvirtPlatformStatus) DeepCopy() *OvirtPlatformStatus { + if in == nil { + return nil + } + out := new(OvirtPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformSpec) DeepCopyInto(out *PlatformSpec) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSPlatformSpec) + (*in).DeepCopyInto(*out) + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(AzurePlatformSpec) + **out = **in + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(GCPPlatformSpec) + **out = **in + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(BareMetalPlatformSpec) + (*in).DeepCopyInto(*out) + } + if in.OpenStack != nil { + in, out := &in.OpenStack, &out.OpenStack + *out = new(OpenStackPlatformSpec) + (*in).DeepCopyInto(*out) + } + if in.Ovirt != nil { + in, out := &in.Ovirt, &out.Ovirt + *out = new(OvirtPlatformSpec) + **out = **in + } + if in.VSphere != nil { + in, out := &in.VSphere, &out.VSphere + *out = new(VSpherePlatformSpec) + (*in).DeepCopyInto(*out) + } + if in.IBMCloud != nil { + in, out := &in.IBMCloud, &out.IBMCloud + *out = new(IBMCloudPlatformSpec) + **out = **in + } + if in.Kubevirt != nil { + in, out := &in.Kubevirt, &out.Kubevirt + *out = new(KubevirtPlatformSpec) + **out = **in + } + if in.EquinixMetal != nil { + in, out := &in.EquinixMetal, &out.EquinixMetal + *out = new(EquinixMetalPlatformSpec) + **out = **in + } + if in.PowerVS != nil { + in, out := &in.PowerVS, &out.PowerVS + *out = new(PowerVSPlatformSpec) + (*in).DeepCopyInto(*out) + } + if in.AlibabaCloud != nil { + in, out := &in.AlibabaCloud, &out.AlibabaCloud + *out = new(AlibabaCloudPlatformSpec) + **out = **in + } + if in.Nutanix != nil { + in, out := &in.Nutanix, &out.Nutanix + *out = new(NutanixPlatformSpec) + (*in).DeepCopyInto(*out) + } + if in.External != nil { + in, out := &in.External, &out.External + *out = new(ExternalPlatformSpec) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformSpec. +func (in *PlatformSpec) DeepCopy() *PlatformSpec { + if in == nil { + return nil + } + out := new(PlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(AzurePlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(GCPPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(BareMetalPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.OpenStack != nil { + in, out := &in.OpenStack, &out.OpenStack + *out = new(OpenStackPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.Ovirt != nil { + in, out := &in.Ovirt, &out.Ovirt + *out = new(OvirtPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.VSphere != nil { + in, out := &in.VSphere, &out.VSphere + *out = new(VSpherePlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.IBMCloud != nil { + in, out := &in.IBMCloud, &out.IBMCloud + *out = new(IBMCloudPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.Kubevirt != nil { + in, out := &in.Kubevirt, &out.Kubevirt + *out = new(KubevirtPlatformStatus) + **out = **in + } + if in.EquinixMetal != nil { + in, out := &in.EquinixMetal, &out.EquinixMetal + *out = new(EquinixMetalPlatformStatus) + **out = **in + } + if in.PowerVS != nil { + in, out := &in.PowerVS, &out.PowerVS + *out = new(PowerVSPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.AlibabaCloud != nil { + in, out := &in.AlibabaCloud, &out.AlibabaCloud + *out = new(AlibabaCloudPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.Nutanix != nil { + in, out := &in.Nutanix, &out.Nutanix + *out = new(NutanixPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.External != nil { + in, out := &in.External, &out.External + *out = new(ExternalPlatformStatus) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformStatus. +func (in *PlatformStatus) DeepCopy() *PlatformStatus { + if in == nil { + return nil + } + out := new(PlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSPlatformSpec) DeepCopyInto(out *PowerVSPlatformSpec) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]PowerVSServiceEndpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSPlatformSpec. +func (in *PowerVSPlatformSpec) DeepCopy() *PowerVSPlatformSpec { + if in == nil { + return nil + } + out := new(PowerVSPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSPlatformStatus) DeepCopyInto(out *PowerVSPlatformStatus) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]PowerVSServiceEndpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSPlatformStatus. +func (in *PowerVSPlatformStatus) DeepCopy() *PowerVSPlatformStatus { + if in == nil { + return nil + } + out := new(PowerVSPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSServiceEndpoint) DeepCopyInto(out *PowerVSServiceEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSServiceEndpoint. +func (in *PowerVSServiceEndpoint) DeepCopy() *PowerVSServiceEndpoint { + if in == nil { + return nil + } + out := new(PowerVSServiceEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrefixedClaimMapping) DeepCopyInto(out *PrefixedClaimMapping) { + *out = *in + out.TokenClaimMapping = in.TokenClaimMapping + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixedClaimMapping. +func (in *PrefixedClaimMapping) DeepCopy() *PrefixedClaimMapping { + if in == nil { + return nil + } + out := new(PrefixedClaimMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProfileCustomizations) DeepCopyInto(out *ProfileCustomizations) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProfileCustomizations. +func (in *ProfileCustomizations) DeepCopy() *ProfileCustomizations { + if in == nil { + return nil + } + out := new(ProfileCustomizations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Project) DeepCopyInto(out *Project) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project. +func (in *Project) DeepCopy() *Project { + if in == nil { + return nil + } + out := new(Project) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Project) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectList) DeepCopyInto(out *ProjectList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Project, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList. +func (in *ProjectList) DeepCopy() *ProjectList { + if in == nil { + return nil + } + out := new(ProjectList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) { + *out = *in + out.ProjectRequestTemplate = in.ProjectRequestTemplate + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec. +func (in *ProjectSpec) DeepCopy() *ProjectSpec { + if in == nil { + return nil + } + out := new(ProjectSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus. +func (in *ProjectStatus) DeepCopy() *ProjectStatus { + if in == nil { + return nil + } + out := new(ProjectStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PromQLClusterCondition) DeepCopyInto(out *PromQLClusterCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PromQLClusterCondition. +func (in *PromQLClusterCondition) DeepCopy() *PromQLClusterCondition { + if in == nil { + return nil + } + out := new(PromQLClusterCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Proxy) DeepCopyInto(out *Proxy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Proxy. +func (in *Proxy) DeepCopy() *Proxy { + if in == nil { + return nil + } + out := new(Proxy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Proxy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyList) DeepCopyInto(out *ProxyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Proxy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyList. +func (in *ProxyList) DeepCopy() *ProxyList { + if in == nil { + return nil + } + out := new(ProxyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProxyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxySpec) DeepCopyInto(out *ProxySpec) { + *out = *in + if in.ReadinessEndpoints != nil { + in, out := &in.ReadinessEndpoints, &out.ReadinessEndpoints + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.TrustedCA = in.TrustedCA + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxySpec. +func (in *ProxySpec) DeepCopy() *ProxySpec { + if in == nil { + return nil + } + out := new(ProxySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyStatus) DeepCopyInto(out *ProxyStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyStatus. +func (in *ProxyStatus) DeepCopy() *ProxyStatus { + if in == nil { + return nil + } + out := new(ProxyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryLocation) DeepCopyInto(out *RegistryLocation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryLocation. +func (in *RegistryLocation) DeepCopy() *RegistryLocation { + if in == nil { + return nil + } + out := new(RegistryLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistrySources) DeepCopyInto(out *RegistrySources) { + *out = *in + if in.InsecureRegistries != nil { + in, out := &in.InsecureRegistries, &out.InsecureRegistries + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.BlockedRegistries != nil { + in, out := &in.BlockedRegistries, &out.BlockedRegistries + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllowedRegistries != nil { + in, out := &in.AllowedRegistries, &out.AllowedRegistries + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ContainerRuntimeSearchRegistries != nil { + in, out := &in.ContainerRuntimeSearchRegistries, &out.ContainerRuntimeSearchRegistries + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrySources. +func (in *RegistrySources) DeepCopy() *RegistrySources { + if in == nil { + return nil + } + out := new(RegistrySources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Release) DeepCopyInto(out *Release) { + *out = *in + if in.Channels != nil { + in, out := &in.Channels, &out.Channels + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Release. +func (in *Release) DeepCopy() *Release { + if in == nil { + return nil + } + out := new(Release) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteConnectionInfo) DeepCopyInto(out *RemoteConnectionInfo) { + *out = *in + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteConnectionInfo. +func (in *RemoteConnectionInfo) DeepCopy() *RemoteConnectionInfo { + if in == nil { + return nil + } + out := new(RemoteConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryDigestMirrors) DeepCopyInto(out *RepositoryDigestMirrors) { + *out = *in + if in.Mirrors != nil { + in, out := &in.Mirrors, &out.Mirrors + *out = make([]Mirror, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryDigestMirrors. +func (in *RepositoryDigestMirrors) DeepCopy() *RepositoryDigestMirrors { + if in == nil { + return nil + } + out := new(RepositoryDigestMirrors) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderIdentityProvider) DeepCopyInto(out *RequestHeaderIdentityProvider) { + *out = *in + out.ClientCA = in.ClientCA + if in.ClientCommonNames != nil { + in, out := &in.ClientCommonNames, &out.ClientCommonNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsernameHeaders != nil { + in, out := &in.PreferredUsernameHeaders, &out.PreferredUsernameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NameHeaders != nil { + in, out := &in.NameHeaders, &out.NameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.EmailHeaders != nil { + in, out := &in.EmailHeaders, &out.EmailHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderIdentityProvider. +func (in *RequestHeaderIdentityProvider) DeepCopy() *RequestHeaderIdentityProvider { + if in == nil { + return nil + } + out := new(RequestHeaderIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequiredHSTSPolicy) DeepCopyInto(out *RequiredHSTSPolicy) { + *out = *in + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.DomainPatterns != nil { + in, out := &in.DomainPatterns, &out.DomainPatterns + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.MaxAge.DeepCopyInto(&out.MaxAge) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequiredHSTSPolicy. +func (in *RequiredHSTSPolicy) DeepCopy() *RequiredHSTSPolicy { + if in == nil { + return nil + } + out := new(RequiredHSTSPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scheduler) DeepCopyInto(out *Scheduler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduler. +func (in *Scheduler) DeepCopy() *Scheduler { + if in == nil { + return nil + } + out := new(Scheduler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Scheduler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerList) DeepCopyInto(out *SchedulerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Scheduler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerList. +func (in *SchedulerList) DeepCopy() *SchedulerList { + if in == nil { + return nil + } + out := new(SchedulerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SchedulerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerSpec) DeepCopyInto(out *SchedulerSpec) { + *out = *in + out.Policy = in.Policy + out.ProfileCustomizations = in.ProfileCustomizations + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerSpec. +func (in *SchedulerSpec) DeepCopy() *SchedulerSpec { + if in == nil { + return nil + } + out := new(SchedulerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerStatus) DeepCopyInto(out *SchedulerStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerStatus. +func (in *SchedulerStatus) DeepCopy() *SchedulerStatus { + if in == nil { + return nil + } + out := new(SchedulerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretNameReference) DeepCopyInto(out *SecretNameReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretNameReference. +func (in *SecretNameReference) DeepCopy() *SecretNameReference { + if in == nil { + return nil + } + out := new(SecretNameReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServingInfo) DeepCopyInto(out *ServingInfo) { + *out = *in + out.CertInfo = in.CertInfo + if in.NamedCertificates != nil { + in, out := &in.NamedCertificates, &out.NamedCertificates + *out = make([]NamedCertificate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CipherSuites != nil { + in, out := &in.CipherSuites, &out.CipherSuites + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServingInfo. +func (in *ServingInfo) DeepCopy() *ServingInfo { + if in == nil { + return nil + } + out := new(ServingInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureStore) DeepCopyInto(out *SignatureStore) { + *out = *in + out.CA = in.CA + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureStore. +func (in *SignatureStore) DeepCopy() *SignatureStore { + if in == nil { + return nil + } + out := new(SignatureStore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringSource) DeepCopyInto(out *StringSource) { + *out = *in + out.StringSourceSpec = in.StringSourceSpec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSource. +func (in *StringSource) DeepCopy() *StringSource { + if in == nil { + return nil + } + out := new(StringSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringSourceSpec) DeepCopyInto(out *StringSourceSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSourceSpec. +func (in *StringSourceSpec) DeepCopy() *StringSourceSpec { + if in == nil { + return nil + } + out := new(StringSourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSProfileSpec) DeepCopyInto(out *TLSProfileSpec) { + *out = *in + if in.Ciphers != nil { + in, out := &in.Ciphers, &out.Ciphers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSProfileSpec. +func (in *TLSProfileSpec) DeepCopy() *TLSProfileSpec { + if in == nil { + return nil + } + out := new(TLSProfileSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSSecurityProfile) DeepCopyInto(out *TLSSecurityProfile) { + *out = *in + if in.Old != nil { + in, out := &in.Old, &out.Old + *out = new(OldTLSProfile) + **out = **in + } + if in.Intermediate != nil { + in, out := &in.Intermediate, &out.Intermediate + *out = new(IntermediateTLSProfile) + **out = **in + } + if in.Modern != nil { + in, out := &in.Modern, &out.Modern + *out = new(ModernTLSProfile) + **out = **in + } + if in.Custom != nil { + in, out := &in.Custom, &out.Custom + *out = new(CustomTLSProfile) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSSecurityProfile. +func (in *TLSSecurityProfile) DeepCopy() *TLSSecurityProfile { + if in == nil { + return nil + } + out := new(TLSSecurityProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateReference) DeepCopyInto(out *TemplateReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateReference. +func (in *TemplateReference) DeepCopy() *TemplateReference { + if in == nil { + return nil + } + out := new(TemplateReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestDetails) DeepCopyInto(out *TestDetails) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestDetails. +func (in *TestDetails) DeepCopy() *TestDetails { + if in == nil { + return nil + } + out := new(TestDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestReporting) DeepCopyInto(out *TestReporting) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestReporting. +func (in *TestReporting) DeepCopy() *TestReporting { + if in == nil { + return nil + } + out := new(TestReporting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestReportingSpec) DeepCopyInto(out *TestReportingSpec) { + *out = *in + if in.TestsForFeatureGates != nil { + in, out := &in.TestsForFeatureGates, &out.TestsForFeatureGates + *out = make([]FeatureGateTests, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestReportingSpec. +func (in *TestReportingSpec) DeepCopy() *TestReportingSpec { + if in == nil { + return nil + } + out := new(TestReportingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestReportingStatus) DeepCopyInto(out *TestReportingStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestReportingStatus. +func (in *TestReportingStatus) DeepCopy() *TestReportingStatus { + if in == nil { + return nil + } + out := new(TestReportingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenClaimMapping) DeepCopyInto(out *TokenClaimMapping) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimMapping. +func (in *TokenClaimMapping) DeepCopy() *TokenClaimMapping { + if in == nil { + return nil + } + out := new(TokenClaimMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenClaimMappings) DeepCopyInto(out *TokenClaimMappings) { + *out = *in + in.Username.DeepCopyInto(&out.Username) + out.Groups = in.Groups + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimMappings. +func (in *TokenClaimMappings) DeepCopy() *TokenClaimMappings { + if in == nil { + return nil + } + out := new(TokenClaimMappings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenClaimValidationRule) DeepCopyInto(out *TokenClaimValidationRule) { + *out = *in + if in.RequiredClaim != nil { + in, out := &in.RequiredClaim, &out.RequiredClaim + *out = new(TokenRequiredClaim) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimValidationRule. +func (in *TokenClaimValidationRule) DeepCopy() *TokenClaimValidationRule { + if in == nil { + return nil + } + out := new(TokenClaimValidationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenConfig) DeepCopyInto(out *TokenConfig) { + *out = *in + if in.AccessTokenInactivityTimeout != nil { + in, out := &in.AccessTokenInactivityTimeout, &out.AccessTokenInactivityTimeout + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenConfig. +func (in *TokenConfig) DeepCopy() *TokenConfig { + if in == nil { + return nil + } + out := new(TokenConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenIssuer) DeepCopyInto(out *TokenIssuer) { + *out = *in + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]TokenAudience, len(*in)) + copy(*out, *in) + } + out.CertificateAuthority = in.CertificateAuthority + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenIssuer. +func (in *TokenIssuer) DeepCopy() *TokenIssuer { + if in == nil { + return nil + } + out := new(TokenIssuer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenRequiredClaim) DeepCopyInto(out *TokenRequiredClaim) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequiredClaim. +func (in *TokenRequiredClaim) DeepCopy() *TokenRequiredClaim { + if in == nil { + return nil + } + out := new(TokenRequiredClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Update) DeepCopyInto(out *Update) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Update. +func (in *Update) DeepCopy() *Update { + if in == nil { + return nil + } + out := new(Update) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdateHistory) DeepCopyInto(out *UpdateHistory) { + *out = *in + in.StartedTime.DeepCopyInto(&out.StartedTime) + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateHistory. +func (in *UpdateHistory) DeepCopy() *UpdateHistory { + if in == nil { + return nil + } + out := new(UpdateHistory) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UsernameClaimMapping) DeepCopyInto(out *UsernameClaimMapping) { + *out = *in + out.TokenClaimMapping = in.TokenClaimMapping + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(UsernamePrefix) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsernameClaimMapping. +func (in *UsernameClaimMapping) DeepCopy() *UsernameClaimMapping { + if in == nil { + return nil + } + out := new(UsernameClaimMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UsernamePrefix) DeepCopyInto(out *UsernamePrefix) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsernamePrefix. +func (in *UsernamePrefix) DeepCopy() *UsernamePrefix { + if in == nil { + return nil + } + out := new(UsernamePrefix) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereFailureDomainHostGroup) DeepCopyInto(out *VSphereFailureDomainHostGroup) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereFailureDomainHostGroup. +func (in *VSphereFailureDomainHostGroup) DeepCopy() *VSphereFailureDomainHostGroup { + if in == nil { + return nil + } + out := new(VSphereFailureDomainHostGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereFailureDomainRegionAffinity) DeepCopyInto(out *VSphereFailureDomainRegionAffinity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereFailureDomainRegionAffinity. +func (in *VSphereFailureDomainRegionAffinity) DeepCopy() *VSphereFailureDomainRegionAffinity { + if in == nil { + return nil + } + out := new(VSphereFailureDomainRegionAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereFailureDomainZoneAffinity) DeepCopyInto(out *VSphereFailureDomainZoneAffinity) { + *out = *in + if in.HostGroup != nil { + in, out := &in.HostGroup, &out.HostGroup + *out = new(VSphereFailureDomainHostGroup) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereFailureDomainZoneAffinity. +func (in *VSphereFailureDomainZoneAffinity) DeepCopy() *VSphereFailureDomainZoneAffinity { + if in == nil { + return nil + } + out := new(VSphereFailureDomainZoneAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePlatformFailureDomainSpec) DeepCopyInto(out *VSpherePlatformFailureDomainSpec) { + *out = *in + if in.RegionAffinity != nil { + in, out := &in.RegionAffinity, &out.RegionAffinity + *out = new(VSphereFailureDomainRegionAffinity) + **out = **in + } + if in.ZoneAffinity != nil { + in, out := &in.ZoneAffinity, &out.ZoneAffinity + *out = new(VSphereFailureDomainZoneAffinity) + (*in).DeepCopyInto(*out) + } + in.Topology.DeepCopyInto(&out.Topology) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformFailureDomainSpec. +func (in *VSpherePlatformFailureDomainSpec) DeepCopy() *VSpherePlatformFailureDomainSpec { + if in == nil { + return nil + } + out := new(VSpherePlatformFailureDomainSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePlatformLoadBalancer) DeepCopyInto(out *VSpherePlatformLoadBalancer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformLoadBalancer. +func (in *VSpherePlatformLoadBalancer) DeepCopy() *VSpherePlatformLoadBalancer { + if in == nil { + return nil + } + out := new(VSpherePlatformLoadBalancer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePlatformNodeNetworking) DeepCopyInto(out *VSpherePlatformNodeNetworking) { + *out = *in + in.External.DeepCopyInto(&out.External) + in.Internal.DeepCopyInto(&out.Internal) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformNodeNetworking. +func (in *VSpherePlatformNodeNetworking) DeepCopy() *VSpherePlatformNodeNetworking { + if in == nil { + return nil + } + out := new(VSpherePlatformNodeNetworking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePlatformNodeNetworkingSpec) DeepCopyInto(out *VSpherePlatformNodeNetworkingSpec) { + *out = *in + if in.NetworkSubnetCIDR != nil { + in, out := &in.NetworkSubnetCIDR, &out.NetworkSubnetCIDR + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludeNetworkSubnetCIDR != nil { + in, out := &in.ExcludeNetworkSubnetCIDR, &out.ExcludeNetworkSubnetCIDR + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformNodeNetworkingSpec. +func (in *VSpherePlatformNodeNetworkingSpec) DeepCopy() *VSpherePlatformNodeNetworkingSpec { + if in == nil { + return nil + } + out := new(VSpherePlatformNodeNetworkingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePlatformSpec) DeepCopyInto(out *VSpherePlatformSpec) { + *out = *in + if in.VCenters != nil { + in, out := &in.VCenters, &out.VCenters + *out = make([]VSpherePlatformVCenterSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = make([]VSpherePlatformFailureDomainSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.NodeNetworking.DeepCopyInto(&out.NodeNetworking) + if in.APIServerInternalIPs != nil { + in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs + *out = make([]IP, len(*in)) + copy(*out, *in) + } + if in.IngressIPs != nil { + in, out := &in.IngressIPs, &out.IngressIPs + *out = make([]IP, len(*in)) + copy(*out, *in) + } + if in.MachineNetworks != nil { + in, out := &in.MachineNetworks, &out.MachineNetworks + *out = make([]CIDR, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformSpec. +func (in *VSpherePlatformSpec) DeepCopy() *VSpherePlatformSpec { + if in == nil { + return nil + } + out := new(VSpherePlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePlatformStatus) DeepCopyInto(out *VSpherePlatformStatus) { + *out = *in + if in.APIServerInternalIPs != nil { + in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IngressIPs != nil { + in, out := &in.IngressIPs, &out.IngressIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(VSpherePlatformLoadBalancer) + **out = **in + } + if in.MachineNetworks != nil { + in, out := &in.MachineNetworks, &out.MachineNetworks + *out = make([]CIDR, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformStatus. +func (in *VSpherePlatformStatus) DeepCopy() *VSpherePlatformStatus { + if in == nil { + return nil + } + out := new(VSpherePlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePlatformTopology) DeepCopyInto(out *VSpherePlatformTopology) { + *out = *in + if in.Networks != nil { + in, out := &in.Networks, &out.Networks + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformTopology. +func (in *VSpherePlatformTopology) DeepCopy() *VSpherePlatformTopology { + if in == nil { + return nil + } + out := new(VSpherePlatformTopology) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePlatformVCenterSpec) DeepCopyInto(out *VSpherePlatformVCenterSpec) { + *out = *in + if in.Datacenters != nil { + in, out := &in.Datacenters, &out.Datacenters + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformVCenterSpec. +func (in *VSpherePlatformVCenterSpec) DeepCopy() *VSpherePlatformVCenterSpec { + if in == nil { + return nil + } + out := new(VSpherePlatformVCenterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookTokenAuthenticator) DeepCopyInto(out *WebhookTokenAuthenticator) { + *out = *in + out.KubeConfig = in.KubeConfig + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookTokenAuthenticator. +func (in *WebhookTokenAuthenticator) DeepCopy() *WebhookTokenAuthenticator { + if in == nil { + return nil + } + out := new(WebhookTokenAuthenticator) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 0000000000000..78fd36f3faa4b --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,515 @@ +apiservers.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: apiservers.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: APIServer + Labels: {} + PluralName: apiservers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +authentications.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: authentications.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - ExternalOIDC + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Authentication + Labels: {} + PluralName: authentications + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +builds.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: builds.config.openshift.io + Capability: Build + Category: "" + FeatureGates: [] + FilenameOperatorName: openshift-controller-manager + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Build + Labels: {} + PluralName: builds + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +clusteroperators.config.openshift.io: + Annotations: + include.release.openshift.io/self-managed-high-availability: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/497 + CRDName: clusteroperators.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: cluster-version-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_00" + GroupName: config.openshift.io + HasStatus: true + KindName: ClusterOperator + Labels: {} + PluralName: clusteroperators + PrinterColumns: + - description: The version the operator is at. + jsonPath: .status.versions[?(@.name=="operator")].version + name: Version + type: string + - description: Whether the operator is running and stable. + jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - description: Whether the operator is processing changes. + jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - description: Whether the operator is degraded. + jsonPath: .status.conditions[?(@.type=="Degraded")].status + name: Degraded + type: string + - description: The time the operator's Available status last changed. + jsonPath: .status.conditions[?(@.type=="Available")].lastTransitionTime + name: Since + type: date + Scope: Cluster + ShortNames: + - co + TopLevelFeatureGates: [] + Version: v1 + +clusterversions.config.openshift.io: + Annotations: + include.release.openshift.io/self-managed-high-availability: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/495 + CRDName: clusterversions.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - ImageStreamImportMode + - SignatureStores + FilenameOperatorName: cluster-version-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_00" + GroupName: config.openshift.io + HasStatus: true + KindName: ClusterVersion + Labels: {} + PluralName: clusterversions + PrinterColumns: + - jsonPath: .status.history[?(@.state=="Completed")].version + name: Version + type: string + - jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime + name: Since + type: date + - jsonPath: .status.conditions[?(@.type=="Progressing")].message + name: Status + type: string + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +consoles.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: consoles.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Console + Labels: {} + PluralName: consoles + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +dnses.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: dnses.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: DNS + Labels: {} + PluralName: dnses + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +featuregates.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: featuregates.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: FeatureGate + Labels: {} + PluralName: featuregates + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +images.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: images.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - ImageStreamImportMode + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Image + Labels: {} + PluralName: images + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +imagecontentpolicies.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/874 + CRDName: imagecontentpolicies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ImageContentPolicy + Labels: {} + PluralName: imagecontentpolicies + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +imagedigestmirrorsets.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/1126 + CRDName: imagedigestmirrorsets.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ImageDigestMirrorSet + Labels: {} + PluralName: imagedigestmirrorsets + PrinterColumns: [] + Scope: Cluster + ShortNames: + - idms + TopLevelFeatureGates: [] + Version: v1 + +imagetagmirrorsets.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/1126 + CRDName: imagetagmirrorsets.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ImageTagMirrorSet + Labels: {} + PluralName: imagetagmirrorsets + PrinterColumns: [] + Scope: Cluster + ShortNames: + - itms + TopLevelFeatureGates: [] + Version: v1 + +infrastructures.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: infrastructures.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - AWSClusterHostedDNS + - BareMetalLoadBalancer + - GCPClusterHostedDNS + - GCPLabelsTags + - HighlyAvailableArbiter + - NutanixMultiSubnets + - VSphereControlPlaneMachineSet + - VSphereHostVMGroupZonal + - VSphereMultiNetworks + - VSphereMultiVCenters + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Infrastructure + Labels: {} + PluralName: infrastructures + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +ingresses.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: ingresses.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Ingress + Labels: {} + PluralName: ingresses + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +networks.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: networks.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - NetworkDiagnosticsConfig + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: false + KindName: Network + Labels: {} + PluralName: networks + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +nodes.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/1107 + CRDName: nodes.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - MinimumKubeletVersion + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Node + Labels: {} + PluralName: nodes + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +oauths.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: oauths.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: OAuth + Labels: {} + PluralName: oauths + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +operatorhubs.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: operatorhubs.config.openshift.io + Capability: marketplace + Category: "" + FeatureGates: [] + FilenameOperatorName: marketplace + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_03" + GroupName: config.openshift.io + HasStatus: true + KindName: OperatorHub + Labels: {} + PluralName: operatorhubs + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +projects.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: projects.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Project + Labels: {} + PluralName: projects + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +proxies.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: proxies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_03" + GroupName: config.openshift.io + HasStatus: true + KindName: Proxy + Labels: {} + PluralName: proxies + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +schedulers.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: schedulers.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - DynamicResourceAllocation + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Scheduler + Labels: {} + PluralName: schedulers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000..0ac9c7ccd2be5 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,2645 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AdmissionConfig = map[string]string{ + "enabledPlugins": "enabledPlugins is a list of admission plugins that must be on in addition to the default list. Some admission plugins are disabled by default, but certain configurations require them. This is fairly uncommon and can result in performance penalties and unexpected behavior.", + "disabledPlugins": "disabledPlugins is a list of admission plugins that must be off. Putting something in this list is almost always a mistake and likely to result in cluster instability.", +} + +func (AdmissionConfig) SwaggerDoc() map[string]string { + return map_AdmissionConfig +} + +var map_AdmissionPluginConfig = map[string]string{ + "": "AdmissionPluginConfig holds the necessary configuration options for admission plugins", + "location": "location is the path to a configuration file that contains the plugin's configuration", + "configuration": "configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.", +} + +func (AdmissionPluginConfig) SwaggerDoc() map[string]string { + return map_AdmissionPluginConfig +} + +var map_AuditConfig = map[string]string{ + "": "AuditConfig holds configuration for the audit capabilities", + "enabled": "If this flag is set, audit log will be printed in the logs. The logs contains, method, user and a requested URL.", + "auditFilePath": "All requests coming to the apiserver will be logged to this file.", + "maximumFileRetentionDays": "Maximum number of days to retain old log files based on the timestamp encoded in their filename.", + "maximumRetainedFiles": "Maximum number of old log files to retain.", + "maximumFileSizeMegabytes": "Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.", + "policyFile": "policyFile is a path to the file that defines the audit policy configuration.", + "policyConfiguration": "policyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.", + "logFormat": "Format of saved audits (legacy or json).", + "webHookKubeConfig": "Path to a .kubeconfig formatted file that defines the audit webhook configuration.", + "webHookMode": "Strategy for sending audit events (block or batch).", +} + +func (AuditConfig) SwaggerDoc() map[string]string { + return map_AuditConfig +} + +var map_CertInfo = map[string]string{ + "": "CertInfo relates a certificate with a private key", + "certFile": "certFile is a file containing a PEM-encoded certificate", + "keyFile": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", +} + +func (CertInfo) SwaggerDoc() map[string]string { + return map_CertInfo +} + +var map_ClientConnectionOverrides = map[string]string{ + "acceptContentTypes": "acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the default value of 'application/json'. This field will control all connections to the server used by a particular client.", + "contentType": "contentType is the content type used when sending data to the server from this client.", + "qps": "qps controls the number of queries per second allowed for this connection.", + "burst": "burst allows extra queries to accumulate when a client is exceeding its rate.", +} + +func (ClientConnectionOverrides) SwaggerDoc() map[string]string { + return map_ClientConnectionOverrides +} + +var map_ConfigMapFileReference = map[string]string{ + "": "ConfigMapFileReference references a config map in a specific namespace. The namespace must be specified at the point of use.", + "key": "key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.", +} + +func (ConfigMapFileReference) SwaggerDoc() map[string]string { + return map_ConfigMapFileReference +} + +var map_ConfigMapNameReference = map[string]string{ + "": "ConfigMapNameReference references a config map in a specific namespace. The namespace must be specified at the point of use.", + "name": "name is the metadata.name of the referenced config map", +} + +func (ConfigMapNameReference) SwaggerDoc() map[string]string { + return map_ConfigMapNameReference +} + +var map_DelegatedAuthentication = map[string]string{ + "": "DelegatedAuthentication allows authentication to be disabled.", + "disabled": "disabled indicates that authentication should be disabled. By default it will use delegated authentication.", +} + +func (DelegatedAuthentication) SwaggerDoc() map[string]string { + return map_DelegatedAuthentication +} + +var map_DelegatedAuthorization = map[string]string{ + "": "DelegatedAuthorization allows authorization to be disabled.", + "disabled": "disabled indicates that authorization should be disabled. By default it will use delegated authorization.", +} + +func (DelegatedAuthorization) SwaggerDoc() map[string]string { + return map_DelegatedAuthorization +} + +var map_EtcdConnectionInfo = map[string]string{ + "": "EtcdConnectionInfo holds information necessary for connecting to an etcd server", + "urls": "urls are the URLs for etcd", + "ca": "ca is a file containing trusted roots for the etcd server certificates", +} + +func (EtcdConnectionInfo) SwaggerDoc() map[string]string { + return map_EtcdConnectionInfo +} + +var map_EtcdStorageConfig = map[string]string{ + "storagePrefix": "storagePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located.", +} + +func (EtcdStorageConfig) SwaggerDoc() map[string]string { + return map_EtcdStorageConfig +} + +var map_GenericAPIServerConfig = map[string]string{ + "": "GenericAPIServerConfig is an inline-able struct for aggregated apiservers that need to store data in etcd", + "servingInfo": "servingInfo describes how to start serving", + "corsAllowedOrigins": "corsAllowedOrigins", + "auditConfig": "auditConfig describes how to configure audit information", + "storageConfig": "storageConfig contains information about how to use", + "admission": "admissionConfig holds information about how to configure admission.", +} + +func (GenericAPIServerConfig) SwaggerDoc() map[string]string { + return map_GenericAPIServerConfig +} + +var map_GenericControllerConfig = map[string]string{ + "": "GenericControllerConfig provides information to configure a controller", + "servingInfo": "servingInfo is the HTTP serving information for the controller's endpoints", + "leaderElection": "leaderElection provides information to elect a leader. Only override this if you have a specific need", + "authentication": "authentication allows configuration of authentication for the endpoints", + "authorization": "authorization allows configuration of authentication for the endpoints", +} + +func (GenericControllerConfig) SwaggerDoc() map[string]string { + return map_GenericControllerConfig +} + +var map_HTTPServingInfo = map[string]string{ + "": "HTTPServingInfo holds configuration for serving HTTP", + "maxRequestsInFlight": "maxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", + "requestTimeoutSeconds": "requestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", +} + +func (HTTPServingInfo) SwaggerDoc() map[string]string { + return map_HTTPServingInfo +} + +var map_KubeClientConfig = map[string]string{ + "kubeConfig": "kubeConfig is a .kubeconfig filename for going to the owning kube-apiserver. Empty uses an in-cluster-config", + "connectionOverrides": "connectionOverrides specifies client overrides for system components to loop back to this master.", +} + +func (KubeClientConfig) SwaggerDoc() map[string]string { + return map_KubeClientConfig +} + +var map_LeaderElection = map[string]string{ + "": "LeaderElection provides information to elect a leader", + "disable": "disable allows leader election to be suspended while allowing a fully defaulted \"normal\" startup case.", + "namespace": "namespace indicates which namespace the resource is in", + "name": "name indicates what name to use for the resource", + "leaseDuration": "leaseDuration is the duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled.", + "renewDeadline": "renewDeadline is the interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled.", + "retryPeriod": "retryPeriod is the duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled.", +} + +func (LeaderElection) SwaggerDoc() map[string]string { + return map_LeaderElection +} + +var map_MaxAgePolicy = map[string]string{ + "": "MaxAgePolicy contains a numeric range for specifying a compliant HSTS max-age for the enclosing RequiredHSTSPolicy", + "largestMaxAge": "The largest allowed value (in seconds) of the RequiredHSTSPolicy max-age This value can be left unspecified, in which case no upper limit is enforced.", + "smallestMaxAge": "The smallest allowed value (in seconds) of the RequiredHSTSPolicy max-age Setting max-age=0 allows the deletion of an existing HSTS header from a host. This is a necessary tool for administrators to quickly correct mistakes. This value can be left unspecified, in which case no lower limit is enforced.", +} + +func (MaxAgePolicy) SwaggerDoc() map[string]string { + return map_MaxAgePolicy +} + +var map_NamedCertificate = map[string]string{ + "": "NamedCertificate specifies a certificate/key, and the names it should be served for", + "names": "names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", +} + +func (NamedCertificate) SwaggerDoc() map[string]string { + return map_NamedCertificate +} + +var map_RemoteConnectionInfo = map[string]string{ + "": "RemoteConnectionInfo holds information necessary for establishing a remote connection", + "url": "url is the remote URL to connect to", + "ca": "ca is the CA for verifying TLS connections", +} + +func (RemoteConnectionInfo) SwaggerDoc() map[string]string { + return map_RemoteConnectionInfo +} + +var map_RequiredHSTSPolicy = map[string]string{ + "namespaceSelector": "namespaceSelector specifies a label selector such that the policy applies only to those routes that are in namespaces with labels that match the selector, and are in one of the DomainPatterns. Defaults to the empty LabelSelector, which matches everything.", + "domainPatterns": "domainPatterns is a list of domains for which the desired HSTS annotations are required. If domainPatterns is specified and a route is created with a spec.host matching one of the domains, the route must specify the HSTS Policy components described in the matching RequiredHSTSPolicy.\n\nThe use of wildcards is allowed like this: *.foo.com matches everything under foo.com. foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*.", + "maxAge": "maxAge is the delta time range in seconds during which hosts are regarded as HSTS hosts. If set to 0, it negates the effect, and hosts are removed as HSTS hosts. If set to 0 and includeSubdomains is specified, all subdomains of the host are also removed as HSTS hosts. maxAge is a time-to-live value, and if this policy is not refreshed on a client, the HSTS policy will eventually expire on that client.", + "preloadPolicy": "preloadPolicy directs the client to include hosts in its host preload list so that it never needs to do an initial load to get the HSTS header (note that this is not defined in RFC 6797 and is therefore client implementation-dependent).", + "includeSubDomainsPolicy": "includeSubDomainsPolicy means the HSTS Policy should apply to any subdomains of the host's domain name. Thus, for the host bar.foo.com, if includeSubDomainsPolicy was set to RequireIncludeSubDomains: - the host app.bar.foo.com would inherit the HSTS Policy of bar.foo.com - the host bar.foo.com would inherit the HSTS Policy of bar.foo.com - the host foo.com would NOT inherit the HSTS Policy of bar.foo.com - the host def.foo.com would NOT inherit the HSTS Policy of bar.foo.com", +} + +func (RequiredHSTSPolicy) SwaggerDoc() map[string]string { + return map_RequiredHSTSPolicy +} + +var map_SecretNameReference = map[string]string{ + "": "SecretNameReference references a secret in a specific namespace. The namespace must be specified at the point of use.", + "name": "name is the metadata.name of the referenced secret", +} + +func (SecretNameReference) SwaggerDoc() map[string]string { + return map_SecretNameReference +} + +var map_ServingInfo = map[string]string{ + "": "ServingInfo holds information about serving web pages", + "bindAddress": "bindAddress is the ip:port to serve on", + "bindNetwork": "bindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", + "clientCA": "clientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", + "namedCertificates": "namedCertificates is a list of certificates to use to secure requests to specific hostnames", + "minTLSVersion": "minTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", + "cipherSuites": "cipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", +} + +func (ServingInfo) SwaggerDoc() map[string]string { + return map_ServingInfo +} + +var map_StringSource = map[string]string{ + "": "StringSource allows specifying a string inline, or externally via env var or file. When it contains only a string value, it marshals to a simple JSON string.", +} + +func (StringSource) SwaggerDoc() map[string]string { + return map_StringSource +} + +var map_StringSourceSpec = map[string]string{ + "": "StringSourceSpec specifies a string value, or external location", + "value": "value specifies the cleartext value, or an encrypted value if keyFile is specified.", + "env": "env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", + "file": "file references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", + "keyFile": "keyFile references a file containing the key to use to decrypt the value.", +} + +func (StringSourceSpec) SwaggerDoc() map[string]string { + return map_StringSourceSpec +} + +var map_APIServer = map[string]string{ + "": "APIServer holds configuration (like serving certificates, client CA and CORS domains) shared by all API servers in the system, among them especially kube-apiserver and openshift-apiserver. The canonical name of an instance is 'cluster'.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (APIServer) SwaggerDoc() map[string]string { + return map_APIServer +} + +var map_APIServerEncryption = map[string]string{ + "type": "type defines what encryption type should be used to encrypt resources at the datastore layer. When this field is unset (i.e. when it is set to the empty string), identity is implied. The behavior of unset can and will change over time. Even if encryption is enabled by default, the meaning of unset may change to a different encryption type based on changes in best practices.\n\nWhen encryption is enabled, all sensitive resources shipped with the platform are encrypted. This list of sensitive resources can and will change over time. The current authoritative list is:\n\n 1. secrets\n 2. configmaps\n 3. routes.route.openshift.io\n 4. oauthaccesstokens.oauth.openshift.io\n 5. oauthauthorizetokens.oauth.openshift.io", +} + +func (APIServerEncryption) SwaggerDoc() map[string]string { + return map_APIServerEncryption +} + +var map_APIServerList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (APIServerList) SwaggerDoc() map[string]string { + return map_APIServerList +} + +var map_APIServerNamedServingCert = map[string]string{ + "": "APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate.", + "names": "names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names.", + "servingCertificate": "servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. The secret must exist in the openshift-config namespace and contain the following required fields: - Secret.Data[\"tls.key\"] - TLS private key. - Secret.Data[\"tls.crt\"] - TLS certificate.", +} + +func (APIServerNamedServingCert) SwaggerDoc() map[string]string { + return map_APIServerNamedServingCert +} + +var map_APIServerServingCerts = map[string]string{ + "namedCertificates": "namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. If no named certificates are provided, or no named certificates match the server name as understood by a client, the defaultServingCertificate will be used.", +} + +func (APIServerServingCerts) SwaggerDoc() map[string]string { + return map_APIServerServingCerts +} + +var map_APIServerSpec = map[string]string{ + "servingCerts": "servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic.", + "clientCA": "clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data[\"ca-bundle.crt\"] - CA bundle.", + "additionalCORSAllowedOrigins": "additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language.", + "encryption": "encryption allows the configuration of encryption of resources at the datastore layer.", + "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nIf unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available minTLSVersion is VersionTLS12.", + "audit": "audit specifies the settings for audit configuration to be applied to all OpenShift-provided API servers in the cluster.", +} + +func (APIServerSpec) SwaggerDoc() map[string]string { + return map_APIServerSpec +} + +var map_Audit = map[string]string{ + "profile": "profile specifies the name of the desired top-level audit profile to be applied to all requests sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, openshift-apiserver and oauth-apiserver), with the exception of those requests that match one or more of the customRules.\n\nThe following profiles are provided: - Default: default policy which means MetaData level logging with the exception of events\n (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody\n level).\n- WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens.\n\nWarning: It is not recommended to disable audit logging by using the `None` profile unless you are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. If you disable audit logging and a support situation arises, you might need to enable audit logging and reproduce the issue in order to troubleshoot properly.\n\nIf unset, the 'Default' profile is used as the default.", + "customRules": "customRules specify profiles per group. These profile take precedence over the top-level profile field if they apply. They are evaluation from top to bottom and the first one that matches, applies.", +} + +func (Audit) SwaggerDoc() map[string]string { + return map_Audit +} + +var map_AuditCustomRule = map[string]string{ + "": "AuditCustomRule describes a custom rule for an audit profile that takes precedence over the top-level profile.", + "group": "group is a name of group a request user must be member of in order to this profile to apply.", + "profile": "profile specifies the name of the desired audit policy configuration to be deployed to all OpenShift-provided API servers in the cluster.\n\nThe following profiles are provided: - Default: the existing default policy. - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens.\n\nIf unset, the 'Default' profile is used as the default.", +} + +func (AuditCustomRule) SwaggerDoc() map[string]string { + return map_AuditCustomRule +} + +var map_Authentication = map[string]string{ + "": "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Authentication) SwaggerDoc() map[string]string { + return map_Authentication +} + +var map_AuthenticationList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (AuthenticationList) SwaggerDoc() map[string]string { + return map_AuthenticationList +} + +var map_AuthenticationSpec = map[string]string{ + "type": "type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth.", + "oauthMetadata": "oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key \"oauthMetadata\" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.", + "webhookTokenAuthenticators": "webhookTokenAuthenticators is DEPRECATED, setting it has no effect.", + "webhookTokenAuthenticator": "webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service.\n\nCan only be set if \"Type\" is set to \"None\".", + "serviceAccountIssuer": "serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the previous issuer value. Instead, the tokens issued by previous service account issuer will continue to be trusted for a time period chosen by the platform (currently set to 24h). This time period is subject to change over time. This allows internal components to transition to use new service account issuer without service distruption.", + "oidcProviders": "oidcProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\".\n\nAt most one provider can be configured.", +} + +func (AuthenticationSpec) SwaggerDoc() map[string]string { + return map_AuthenticationSpec +} + +var map_AuthenticationStatus = map[string]string{ + "integratedOAuthMetadata": "integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key \"oauthMetadata\" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.", + "oidcClients": "oidcClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin.", +} + +func (AuthenticationStatus) SwaggerDoc() map[string]string { + return map_AuthenticationStatus +} + +var map_DeprecatedWebhookTokenAuthenticator = map[string]string{ + "": "deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field.", + "kubeConfig": "kubeConfig contains kube config file data which describes how to access the remote webhook service. For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication The key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored. The namespace for this secret is determined by the point of use.", +} + +func (DeprecatedWebhookTokenAuthenticator) SwaggerDoc() map[string]string { + return map_DeprecatedWebhookTokenAuthenticator +} + +var map_OIDCClientConfig = map[string]string{ + "componentName": "componentName is the name of the component that is supposed to consume this client configuration", + "componentNamespace": "componentNamespace is the namespace of the component that is supposed to consume this client configuration", + "clientID": "clientID is the identifier of the OIDC client from the OIDC provider", + "clientSecret": "clientSecret refers to a secret in the `openshift-config` namespace that contains the client secret in the `clientSecret` key of the `.data` field", + "extraScopes": "extraScopes is an optional set of scopes to request tokens with.", +} + +func (OIDCClientConfig) SwaggerDoc() map[string]string { + return map_OIDCClientConfig +} + +var map_OIDCClientReference = map[string]string{ + "oidcProviderName": "OIDCName refers to the `name` of the provider from `oidcProviders`", + "issuerURL": "URL is the serving URL of the token issuer. Must use the https:// scheme.", + "clientID": "clientID is the identifier of the OIDC client from the OIDC provider", +} + +func (OIDCClientReference) SwaggerDoc() map[string]string { + return map_OIDCClientReference +} + +var map_OIDCClientStatus = map[string]string{ + "componentName": "componentName is the name of the component that will consume a client configuration.", + "componentNamespace": "componentNamespace is the namespace of the component that will consume a client configuration.", + "currentOIDCClients": "currentOIDCClients is a list of clients that the component is currently using.", + "consumingUsers": "consumingUsers is a slice of ServiceAccounts that need to have read permission on the `clientSecret` secret.", + "conditions": "conditions are used to communicate the state of the `oidcClients` entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf Available is true, the component is successfully using the configured client. If Degraded is true, that means something has gone wrong trying to handle the client configuration. If Progressing is true, that means the component is taking some action related to the `oidcClients` entry.", +} + +func (OIDCClientStatus) SwaggerDoc() map[string]string { + return map_OIDCClientStatus +} + +var map_OIDCProvider = map[string]string{ + "name": "name of the OIDC provider", + "issuer": "issuer describes atributes of the OIDC token issuer", + "oidcClients": "oidcClients contains configuration for the platform's clients that need to request tokens from the issuer", + "claimMappings": "claimMappings describes rules on how to transform information from an ID token into a cluster identity", + "claimValidationRules": "claimValidationRules are rules that are applied to validate token claims to authenticate users.", +} + +func (OIDCProvider) SwaggerDoc() map[string]string { + return map_OIDCProvider +} + +var map_PrefixedClaimMapping = map[string]string{ + "prefix": "prefix is a string to prefix the value from the token in the result of the claim mapping.\n\nBy default, no prefixing occurs.\n\nExample: if `prefix` is set to \"myoidc:\"\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".", +} + +func (PrefixedClaimMapping) SwaggerDoc() map[string]string { + return map_PrefixedClaimMapping +} + +var map_TokenClaimMapping = map[string]string{ + "claim": "claim is a JWT token claim to be used in the mapping", +} + +func (TokenClaimMapping) SwaggerDoc() map[string]string { + return map_TokenClaimMapping +} + +var map_TokenClaimMappings = map[string]string{ + "username": "username is a name of the claim that should be used to construct usernames for the cluster identity.\n\nDefault value: \"sub\"", + "groups": "groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values.", +} + +func (TokenClaimMappings) SwaggerDoc() map[string]string { + return map_TokenClaimMappings +} + +var map_TokenClaimValidationRule = map[string]string{ + "type": "type sets the type of the validation rule", + "requiredClaim": "requiredClaim allows configuring a required claim name and its expected value", +} + +func (TokenClaimValidationRule) SwaggerDoc() map[string]string { + return map_TokenClaimValidationRule +} + +var map_TokenIssuer = map[string]string{ + "issuerURL": "URL is the serving URL of the token issuer. Must use the https:// scheme.", + "audiences": "audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their \"aud\" claim. Must be set to exactly one value.", + "issuerCertificateAuthority": "CertificateAuthority is a reference to a config map in the configuration namespace. The .data of the configMap must contain the \"ca-bundle.crt\" key. If unset, system trust is used instead.", +} + +func (TokenIssuer) SwaggerDoc() map[string]string { + return map_TokenIssuer +} + +var map_TokenRequiredClaim = map[string]string{ + "claim": "claim is a name of a required claim. Only claims with string values are supported.", + "requiredValue": "requiredValue is the required value for the claim.", +} + +func (TokenRequiredClaim) SwaggerDoc() map[string]string { + return map_TokenRequiredClaim +} + +var map_UsernameClaimMapping = map[string]string{ + "prefixPolicy": "prefixPolicy specifies how a prefix should apply.\n\nBy default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins.\n\nSet to \"NoPrefix\" to disable prefixing.\n\nExample:\n (1) `prefix` is set to \"myoidc:\" and `claim` is set to \"username\".\n If the JWT claim `username` contains value `userA`, the resulting\n mapped value will be \"myoidc:userA\".\n (2) `prefix` is set to \"myoidc:\" and `claim` is set to \"email\". If the\n JWT `email` claim contains value \"userA@myoidc.tld\", the resulting\n mapped value will be \"myoidc:userA@myoidc.tld\".\n (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n (b) \"email\": the mapped value will be \"userA@myoidc.tld\"", +} + +func (UsernameClaimMapping) SwaggerDoc() map[string]string { + return map_UsernameClaimMapping +} + +var map_WebhookTokenAuthenticator = map[string]string{ + "": "webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator", + "kubeConfig": "kubeConfig references a secret that contains kube config file data which describes how to access the remote webhook service. The namespace for the referenced secret is openshift-config.\n\nFor further details, see:\n\nhttps://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication\n\nThe key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored.", +} + +func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string { + return map_WebhookTokenAuthenticator +} + +var map_Build = map[string]string{ + "": "Build configures the behavior of OpenShift builds for the entire cluster. This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds.\n\nThe canonical name is \"cluster\"\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user-settable values for the build controller configuration", +} + +func (Build) SwaggerDoc() map[string]string { + return map_Build +} + +var map_BuildDefaults = map[string]string{ + "defaultProxy": "defaultProxy contains the default proxy settings for all build operations, including image pull/push and source download.\n\nValues can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy.", + "gitProxy": "gitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone.\n\nValues that are not set here will be inherited from DefaultProxy.", + "env": "env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build", + "imageLabels": "imageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.", + "resources": "resources defines resource requirements to execute the build.", +} + +func (BuildDefaults) SwaggerDoc() map[string]string { + return map_BuildDefaults +} + +var map_BuildList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (BuildList) SwaggerDoc() map[string]string { + return map_BuildList +} + +var map_BuildOverrides = map[string]string{ + "imageLabels": "imageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.", + "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node", + "tolerations": "tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.", + "forcePull": "forcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself", +} + +func (BuildOverrides) SwaggerDoc() map[string]string { + return map_BuildOverrides +} + +var map_BuildSpec = map[string]string{ + "additionalTrustedCA": "additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.\n\nDEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead.", + "buildDefaults": "buildDefaults controls the default information for Builds", + "buildOverrides": "buildOverrides controls override settings for builds", +} + +func (BuildSpec) SwaggerDoc() map[string]string { + return map_BuildSpec +} + +var map_ImageLabel = map[string]string{ + "name": "name defines the name of the label. It must have non-zero length.", + "value": "value defines the literal value of the label.", +} + +func (ImageLabel) SwaggerDoc() map[string]string { + return map_ImageLabel +} + +var map_ClusterOperator = map[string]string{ + "": "ClusterOperator is the Custom Resource object which holds the current state of an operator. This object is used by operators to convey their state to the rest of the cluster.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds configuration that could apply to any operator.", + "status": "status holds the information about the state of an operator. It is consistent with status information across the Kubernetes ecosystem.", +} + +func (ClusterOperator) SwaggerDoc() map[string]string { + return map_ClusterOperator +} + +var map_ClusterOperatorList = map[string]string{ + "": "ClusterOperatorList is a list of OperatorStatus resources.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ClusterOperatorList) SwaggerDoc() map[string]string { + return map_ClusterOperatorList +} + +var map_ClusterOperatorSpec = map[string]string{ + "": "ClusterOperatorSpec is empty for now, but you could imagine holding information like \"pause\".", +} + +func (ClusterOperatorSpec) SwaggerDoc() map[string]string { + return map_ClusterOperatorSpec +} + +var map_ClusterOperatorStatus = map[string]string{ + "": "ClusterOperatorStatus provides information about the status of the operator.", + "conditions": "conditions describes the state of the operator's managed and monitored components.", + "versions": "versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple operand entries in the array. Available operators must report the version of the operator itself with the name \"operator\". An operator reports a new \"operator\" version when it has rolled out the new version to all of its operands.", + "relatedObjects": "relatedObjects is a list of objects that are \"interesting\" or related to this operator. Common uses are: 1. the detailed resource driving the operator 2. operator namespaces 3. operand namespaces", + "extension": "extension contains any additional status information specific to the operator which owns this status object.", +} + +func (ClusterOperatorStatus) SwaggerDoc() map[string]string { + return map_ClusterOperatorStatus +} + +var map_ClusterOperatorStatusCondition = map[string]string{ + "": "ClusterOperatorStatusCondition represents the state of the operator's managed and monitored components.", + "type": "type specifies the aspect reported by this condition.", + "status": "status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "lastTransitionTime is the time of the last update to the current status property.", + "reason": "reason is the CamelCase reason for the condition's current status.", + "message": "message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines.", +} + +func (ClusterOperatorStatusCondition) SwaggerDoc() map[string]string { + return map_ClusterOperatorStatusCondition +} + +var map_ObjectReference = map[string]string{ + "": "ObjectReference contains enough information to let you inspect or modify the referred object.", + "group": "group of the referent.", + "resource": "resource of the referent.", + "namespace": "namespace of the referent.", + "name": "name of the referent.", +} + +func (ObjectReference) SwaggerDoc() map[string]string { + return map_ObjectReference +} + +var map_OperandVersion = map[string]string{ + "name": "name is the name of the particular operand this version is for. It usually matches container images, not operators.", + "version": "version indicates which version of a particular operand is currently being managed. It must always match the Available operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout 1.1.0", +} + +func (OperandVersion) SwaggerDoc() map[string]string { + return map_OperandVersion +} + +var map_ClusterCondition = map[string]string{ + "": "ClusterCondition is a union of typed cluster conditions. The 'type' property determines which of the type-specific properties are relevant. When evaluated on a cluster, the condition may match, not match, or fail to evaluate.", + "type": "type represents the cluster-condition type. This defines the members and semantics of any additional properties.", + "promql": "promql represents a cluster condition based on PromQL.", +} + +func (ClusterCondition) SwaggerDoc() map[string]string { + return map_ClusterCondition +} + +var map_ClusterVersion = map[string]string{ + "": "ClusterVersion is the configuration for the ClusterVersionOperator. This is where parameters related to automatic updates can be set.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the cluster version - the operator will work to ensure that the desired version is applied to the cluster.", + "status": "status contains information about the available updates and any in-progress updates.", +} + +func (ClusterVersion) SwaggerDoc() map[string]string { + return map_ClusterVersion +} + +var map_ClusterVersionCapabilitiesSpec = map[string]string{ + "": "ClusterVersionCapabilitiesSpec selects the managed set of optional, core cluster components.", + "baselineCapabilitySet": "baselineCapabilitySet selects an initial set of optional capabilities to enable, which can be extended via additionalEnabledCapabilities. If unset, the cluster will choose a default, and the default may change over time. The current default is vCurrent.", + "additionalEnabledCapabilities": "additionalEnabledCapabilities extends the set of managed capabilities beyond the baseline defined in baselineCapabilitySet. The default is an empty set.", +} + +func (ClusterVersionCapabilitiesSpec) SwaggerDoc() map[string]string { + return map_ClusterVersionCapabilitiesSpec +} + +var map_ClusterVersionCapabilitiesStatus = map[string]string{ + "": "ClusterVersionCapabilitiesStatus describes the state of optional, core cluster components.", + "enabledCapabilities": "enabledCapabilities lists all the capabilities that are currently managed.", + "knownCapabilities": "knownCapabilities lists all the capabilities known to the current cluster.", +} + +func (ClusterVersionCapabilitiesStatus) SwaggerDoc() map[string]string { + return map_ClusterVersionCapabilitiesStatus +} + +var map_ClusterVersionList = map[string]string{ + "": "ClusterVersionList is a list of ClusterVersion resources.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ClusterVersionList) SwaggerDoc() map[string]string { + return map_ClusterVersionList +} + +var map_ClusterVersionSpec = map[string]string{ + "": "ClusterVersionSpec is the desired version state of the cluster. It includes the version the cluster should be at, how the cluster is identified, and where the cluster should look for version updates.", + "clusterID": "clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field.", + "desiredUpdate": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail.\n\nSome of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. You should not do this. version is silently ignored and image is used. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed.", + "upstream": "upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region.", + "channel": "channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters.", + "capabilities": "capabilities configures the installation of optional, core cluster components. A null value here is identical to an empty object; see the child properties for default semantics.", + "signatureStores": "signatureStores contains the upstream URIs to verify release signatures and optional reference to a config map by name containing the PEM-encoded CA bundle.\n\nBy default, CVO will use existing signature stores if this property is empty. The CVO will check the release signatures in the local ConfigMaps first. It will search for a valid signature in these stores in parallel only when local ConfigMaps did not include a valid signature. Validation will fail if none of the signature stores reply with valid signature before timeout. Setting signatureStores will replace the default signature stores with custom signature stores. Default stores can be used with custom signature stores by adding them manually.\n\nA maximum of 32 signature stores may be configured.", + "overrides": "overrides is list of overides for components that are managed by cluster version operator. Marking a component unmanaged will prevent the operator from creating or updating the object.", +} + +func (ClusterVersionSpec) SwaggerDoc() map[string]string { + return map_ClusterVersionSpec +} + +var map_ClusterVersionStatus = map[string]string{ + "": "ClusterVersionStatus reports the status of the cluster versioning, including any upgrades that are in progress. The current field will be set to whichever version the cluster is reconciling to, and the conditions array will report whether the update succeeded, is in progress, or is failing.", + "desired": "desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag.", + "history": "history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved.", + "observedGeneration": "observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version.", + "versionHash": "versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only.", + "capabilities": "capabilities describes the state of optional, core cluster components.", + "conditions": "conditions provides information about the cluster version. The condition \"Available\" is set to true if the desiredUpdate has been reached. The condition \"Progressing\" is set to true if an update is being applied. The condition \"Degraded\" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation.", + "availableUpdates": "availableUpdates contains updates recommended for this cluster. Updates which appear in conditionalUpdates but not in availableUpdates may expose this cluster to known issues. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified.", + "conditionalUpdates": "conditionalUpdates contains the list of updates that may be recommended for this cluster if it meets specific required conditions. Consumers interested in the set of updates that are actually recommended for this cluster should use availableUpdates. This list may be empty if no updates are recommended, if the update service is unavailable, or if an empty or invalid channel has been specified.", +} + +func (ClusterVersionStatus) SwaggerDoc() map[string]string { + return map_ClusterVersionStatus +} + +var map_ComponentOverride = map[string]string{ + "": "ComponentOverride allows overriding cluster version operator's behavior for a component.", + "kind": "kind indentifies which object to override.", + "group": "group identifies the API group that the kind is in.", + "namespace": "namespace is the component's namespace. If the resource is cluster scoped, the namespace should be empty.", + "name": "name is the component's name.", + "unmanaged": "unmanaged controls if cluster version operator should stop managing the resources in this cluster. Default: false", +} + +func (ComponentOverride) SwaggerDoc() map[string]string { + return map_ComponentOverride +} + +var map_ConditionalUpdate = map[string]string{ + "": "ConditionalUpdate represents an update which is recommended to some clusters on the version the current cluster is reconciling, but which may not be recommended for the current cluster.", + "release": "release is the target of the update.", + "risks": "risks represents the range of issues associated with updating to the target release. The cluster-version operator will evaluate all entries, and only recommend the update if there is at least one entry and all entries recommend the update.", + "conditions": "conditions represents the observations of the conditional update's current status. Known types are: * Recommended, for whether the update is recommended for the current cluster.", +} + +func (ConditionalUpdate) SwaggerDoc() map[string]string { + return map_ConditionalUpdate +} + +var map_ConditionalUpdateRisk = map[string]string{ + "": "ConditionalUpdateRisk represents a reason and cluster-state for not recommending a conditional update.", + "url": "url contains information about this risk.", + "name": "name is the CamelCase reason for not recommending a conditional update, in the event that matchingRules match the cluster state.", + "message": "message provides additional information about the risk of updating, in the event that matchingRules match the cluster state. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines.", + "matchingRules": "matchingRules is a slice of conditions for deciding which clusters match the risk and which do not. The slice is ordered by decreasing precedence. The cluster-version operator will walk the slice in order, and stop after the first it can successfully evaluate. If no condition can be successfully evaluated, the update will not be recommended.", +} + +func (ConditionalUpdateRisk) SwaggerDoc() map[string]string { + return map_ConditionalUpdateRisk +} + +var map_PromQLClusterCondition = map[string]string{ + "": "PromQLClusterCondition represents a cluster condition based on PromQL.", + "promql": "promql is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures.", +} + +func (PromQLClusterCondition) SwaggerDoc() map[string]string { + return map_PromQLClusterCondition +} + +var map_Release = map[string]string{ + "": "Release represents an OpenShift release image and associated metadata.", + "architecture": "architecture is an optional field that indicates the value of the cluster architecture. In this context cluster architecture means either a single architecture or a multi architecture. Valid values are 'Multi' and empty.", + "version": "version is a semantic version identifying the update version. When this field is part of spec, version is optional if image is specified.", + "image": "image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version.", + "url": "url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases.", + "channels": "channels is the set of Cincinnati channels to which the release currently belongs.", +} + +func (Release) SwaggerDoc() map[string]string { + return map_Release +} + +var map_SignatureStore = map[string]string{ + "": "SignatureStore represents the URL of custom Signature Store", + "url": "url contains the upstream custom signature store URL. url should be a valid absolute http/https URI of an upstream signature store as per rfc1738. This must be provided and cannot be empty.", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the signature store is not honored. If the specified ca data is not valid, the signature store is not honored. If empty, we fall back to the CA configured via Proxy, which is appended to the default system roots. The namespace for this config map is openshift-config.", +} + +func (SignatureStore) SwaggerDoc() map[string]string { + return map_SignatureStore +} + +var map_Update = map[string]string{ + "": "Update represents an administrator update request.", + "architecture": "architecture is an optional field that indicates the desired value of the cluster architecture. In this context cluster architecture means either a single architecture or a multi architecture. architecture can only be set to Multi thereby only allowing updates from single to multi architecture. If architecture is set, image cannot be set and version must be set. Valid values are 'Multi' and empty.", + "version": "version is a semantic version identifying the update version. version is ignored if image is specified and required if architecture is specified.", + "image": "image is a container image location that contains the update. image should be used when the desired version does not exist in availableUpdates or history. When image is set, version is ignored. When image is set, version should be empty. When image is set, architecture cannot be specified.", + "force": "force allows an administrator to update to an image that has failed verification or upgradeable checks. This option should only be used when the authenticity of the provided image has been verified out of band because the provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources.", +} + +func (Update) SwaggerDoc() map[string]string { + return map_Update +} + +var map_UpdateHistory = map[string]string{ + "": "UpdateHistory is a single attempted update to the cluster.", + "state": "state reflects whether the update was fully applied. The Partial state indicates the update is not fully applied, while the Completed state indicates the update was successfully rolled out at least once (all parts of the update successfully applied).", + "startedTime": "startedTime is the time at which the update was started.", + "completionTime": "completionTime, if set, is when the update was fully applied. The update that is currently being applied will have a null completion time. Completion time will always be set for entries that are not the current update (usually to the started time of the next update).", + "version": "version is a semantic version identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty.", + "image": "image is a container image location that contains the update. This value is always populated.", + "verified": "verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted. Verified does not cover upgradeable checks that depend on the cluster state at the time when the update target was accepted.", + "acceptedRisks": "acceptedRisks records risks which were accepted to initiate the update. For example, it may menition an Upgradeable=False or missing signature that was overriden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets.", +} + +func (UpdateHistory) SwaggerDoc() map[string]string { + return map_UpdateHistory +} + +var map_Console = map[string]string{ + "": "Console holds cluster-wide configuration for the web console, including the logout URL, and reports the public URL of the console. The canonical name is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Console) SwaggerDoc() map[string]string { + return map_Console +} + +var map_ConsoleAuthentication = map[string]string{ + "": "ConsoleAuthentication defines a list of optional configuration for console authentication.", + "logoutRedirect": "An optional, absolute URL to redirect web browsers to after logging out of the console. If not specified, it will redirect to the default login page. This is required when using an identity provider that supports single sign-on (SSO) such as: - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, SAML) - OAuth (GitHub, GitLab, Google) Logging out of the console will destroy the user's token. The logoutRedirect provides the user the option to perform single logout (SLO) through the identity provider to destroy their single sign-on session.", +} + +func (ConsoleAuthentication) SwaggerDoc() map[string]string { + return map_ConsoleAuthentication +} + +var map_ConsoleList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ConsoleList) SwaggerDoc() map[string]string { + return map_ConsoleList +} + +var map_ConsoleSpec = map[string]string{ + "": "ConsoleSpec is the specification of the desired behavior of the Console.", +} + +func (ConsoleSpec) SwaggerDoc() map[string]string { + return map_ConsoleSpec +} + +var map_ConsoleStatus = map[string]string{ + "": "ConsoleStatus defines the observed status of the Console.", + "consoleURL": "The URL for the console. This will be derived from the host for the route that is created for the console.", +} + +func (ConsoleStatus) SwaggerDoc() map[string]string { + return map_ConsoleStatus +} + +var map_AWSDNSSpec = map[string]string{ + "": "AWSDNSSpec contains DNS configuration specific to the Amazon Web Services cloud provider.", + "privateZoneIAMRole": "privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing operations on the cluster's private hosted zone specified in the cluster DNS config. When left empty, no role should be assumed.", +} + +func (AWSDNSSpec) SwaggerDoc() map[string]string { + return map_AWSDNSSpec +} + +var map_DNS = map[string]string{ + "": "DNS holds cluster-wide information about DNS. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (DNS) SwaggerDoc() map[string]string { + return map_DNS +} + +var map_DNSList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (DNSList) SwaggerDoc() map[string]string { + return map_DNSList +} + +var map_DNSPlatformSpec = map[string]string{ + "": "DNSPlatformSpec holds cloud-provider-specific configuration for DNS administration.", + "type": "type is the underlying infrastructure provider for the cluster. Allowed values: \"\", \"AWS\".\n\nIndividual components may not support all platforms, and must handle unrecognized platforms with best-effort defaults.", + "aws": "aws contains DNS configuration specific to the Amazon Web Services cloud provider.", +} + +func (DNSPlatformSpec) SwaggerDoc() map[string]string { + return map_DNSPlatformSpec +} + +var map_DNSSpec = map[string]string{ + "baseDomain": "baseDomain is the base domain of the cluster. All managed DNS records will be sub-domains of this base.\n\nFor example, given the base domain `openshift.example.com`, an API server DNS record may be created for `cluster-api.openshift.example.com`.\n\nOnce set, this field cannot be changed.", + "publicZone": "publicZone is the location where all the DNS records that are publicly accessible to the internet exist.\n\nIf this field is nil, no public records should be created.\n\nOnce set, this field cannot be changed.", + "privateZone": "privateZone is the location where all the DNS records that are only available internally to the cluster exist.\n\nIf this field is nil, no private records should be created.\n\nOnce set, this field cannot be changed.", + "platform": "platform holds configuration specific to the underlying infrastructure provider for DNS. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time.", +} + +func (DNSSpec) SwaggerDoc() map[string]string { + return map_DNSSpec +} + +var map_DNSZone = map[string]string{ + "": "DNSZone is used to define a DNS hosted zone. A zone can be identified by an ID or tags.", + "id": "id is the identifier that can be used to find the DNS hosted zone.\n\non AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3].\n\n[1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get", + "tags": "tags can be used to query the DNS hosted zone.\n\non AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters,\n\n[1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options", +} + +func (DNSZone) SwaggerDoc() map[string]string { + return map_DNSZone +} + +var map_CustomFeatureGates = map[string]string{ + "enabled": "enabled is a list of all feature gates that you want to force on", + "disabled": "disabled is a list of all feature gates that you want to force off", +} + +func (CustomFeatureGates) SwaggerDoc() map[string]string { + return map_CustomFeatureGates +} + +var map_FeatureGate = map[string]string{ + "": "Feature holds cluster-wide information about feature gates. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (FeatureGate) SwaggerDoc() map[string]string { + return map_FeatureGate +} + +var map_FeatureGateAttributes = map[string]string{ + "name": "name is the name of the FeatureGate.", +} + +func (FeatureGateAttributes) SwaggerDoc() map[string]string { + return map_FeatureGateAttributes +} + +var map_FeatureGateDetails = map[string]string{ + "version": "version matches the version provided by the ClusterVersion and in the ClusterOperator.Status.Versions field.", + "enabled": "enabled is a list of all feature gates that are enabled in the cluster for the named version.", + "disabled": "disabled is a list of all feature gates that are disabled in the cluster for the named version.", +} + +func (FeatureGateDetails) SwaggerDoc() map[string]string { + return map_FeatureGateDetails +} + +var map_FeatureGateList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (FeatureGateList) SwaggerDoc() map[string]string { + return map_FeatureGateList +} + +var map_FeatureGateSelection = map[string]string{ + "featureSet": "featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. Turning on or off features may cause irreversible changes in your cluster which cannot be undone.", + "customNoUpgrade": "customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations your cluster may fail in an unrecoverable way. featureSet must equal \"CustomNoUpgrade\" must be set to use this field.", +} + +func (FeatureGateSelection) SwaggerDoc() map[string]string { + return map_FeatureGateSelection +} + +var map_FeatureGateStatus = map[string]string{ + "conditions": "conditions represent the observations of the current state. Known .status.conditions.type are: \"DeterminationDegraded\"", + "featureGates": "featureGates contains a list of enabled and disabled featureGates that are keyed by payloadVersion. Operators other than the CVO and cluster-config-operator, must read the .status.featureGates, locate the version they are managing, find the enabled/disabled featuregates and make the operand and operator match. The enabled/disabled values for a particular version may change during the life of the cluster as various .spec.featureSet values are selected. Operators may choose to restart their processes to pick up these changes, but remembering past enable/disable lists is beyond the scope of this API and is the responsibility of individual operators. Only featureGates with .version in the ClusterVersion.status will be present in this list.", +} + +func (FeatureGateStatus) SwaggerDoc() map[string]string { + return map_FeatureGateStatus +} + +var map_Image = map[string]string{ + "": "Image governs policies related to imagestream imports and runtime configuration for external registries. It allows cluster admins to configure which registries OpenShift is allowed to import images from, extra CA trust bundles for external registries, and policies to block or allow registry hostnames. When exposing OpenShift's image registry to the public, this also lets cluster admins specify the external hostname.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Image) SwaggerDoc() map[string]string { + return map_Image +} + +var map_ImageList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ImageList) SwaggerDoc() map[string]string { + return map_ImageList +} + +var map_ImageSpec = map[string]string{ + "allowedRegistriesForImport": "allowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.", + "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", + "additionalTrustedCA": "additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import, pod image pull, build image pull, and imageregistry pullthrough. The namespace for this config map is openshift-config.", + "registrySources": "registrySources contains configuration that determines how the container runtime should treat individual registries when accessing images for builds+pods. (e.g. whether or not to allow insecure access). It does not contain configuration for the internal cluster registry.", + "imageStreamImportMode": "imageStreamImportMode controls the import mode behaviour of imagestreams. It can be set to `Legacy` or `PreserveOriginal` or the empty string. If this value is specified, this setting is applied to all newly created imagestreams which do not have the value set. `Legacy` indicates that the legacy behaviour should be used. For manifest lists, the legacy behaviour will discard the manifest list and import a single sub-manifest. In this case, the platform is chosen in the following order of priority: 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list. `PreserveOriginal` indicates that the original manifest will be preserved. For manifest lists, the manifest list and all its sub-manifests will be imported. When empty, the behaviour will be decided based on the payload type advertised by the ClusterVersion status, i.e single arch payload implies the import mode is Legacy and multi payload implies PreserveOriginal.", +} + +func (ImageSpec) SwaggerDoc() map[string]string { + return map_ImageSpec +} + +var map_ImageStatus = map[string]string{ + "internalRegistryHostname": "internalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. This value is set by the image registry operator which controls the internal registry hostname.", + "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", + "imageStreamImportMode": "imageStreamImportMode controls the import mode behaviour of imagestreams. It can be `Legacy` or `PreserveOriginal`. `Legacy` indicates that the legacy behaviour should be used. For manifest lists, the legacy behaviour will discard the manifest list and import a single sub-manifest. In this case, the platform is chosen in the following order of priority: 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list. `PreserveOriginal` indicates that the original manifest will be preserved. For manifest lists, the manifest list and all its sub-manifests will be imported. This value will be reconciled based on either the spec value or if no spec value is specified, the image registry operator would look at the ClusterVersion status to determine the payload type and set the import mode accordingly, i.e single arch payload implies the import mode is Legacy and multi payload implies PreserveOriginal.", +} + +func (ImageStatus) SwaggerDoc() map[string]string { + return map_ImageStatus +} + +var map_RegistryLocation = map[string]string{ + "": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.", + "domainName": "domainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", + "insecure": "insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", +} + +func (RegistryLocation) SwaggerDoc() map[string]string { + return map_RegistryLocation +} + +var map_RegistrySources = map[string]string{ + "": "RegistrySources holds cluster-wide information about how to handle the registries config.", + "insecureRegistries": "insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.", + "blockedRegistries": "blockedRegistries cannot be used for image pull and push actions. All other registries are permitted.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.", + "allowedRegistries": "allowedRegistries are the only registries permitted for image pull and push actions. All other registries are denied.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.", + "containerRuntimeSearchRegistries": "containerRuntimeSearchRegistries are registries that will be searched when pulling images that do not have fully qualified domains in their pull specs. Registries will be searched in the order provided in the list. Note: this search list only works with the container runtime, i.e CRI-O. Will NOT work with builds or imagestream imports.", +} + +func (RegistrySources) SwaggerDoc() map[string]string { + return map_RegistrySources +} + +var map_ImageContentPolicy = map[string]string{ + "": "ImageContentPolicy holds cluster-wide information about how to handle registry mirror rules. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", +} + +func (ImageContentPolicy) SwaggerDoc() map[string]string { + return map_ImageContentPolicy +} + +var map_ImageContentPolicyList = map[string]string{ + "": "ImageContentPolicyList lists the items in the ImageContentPolicy CRD.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ImageContentPolicyList) SwaggerDoc() map[string]string { + return map_ImageContentPolicyList +} + +var map_ImageContentPolicySpec = map[string]string{ + "": "ImageContentPolicySpec is the specification of the ImageContentPolicy CRD.", + "repositoryDigestMirrors": "repositoryDigestMirrors allows images referenced by image digests in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in RepositoryDigestMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To pull image from mirrors by tags, should set the \"allowMirrorByTags\".\n\nEach “source” repository is treated independently; configurations for different “source” repositories don’t interact.\n\nIf the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec.\n\nWhen multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified.", +} + +func (ImageContentPolicySpec) SwaggerDoc() map[string]string { + return map_ImageContentPolicySpec +} + +var map_RepositoryDigestMirrors = map[string]string{ + "": "RepositoryDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config.", + "source": "source is the repository that users refer to, e.g. in image pull specifications.", + "allowMirrorByTags": "allowMirrorByTags if true, the mirrors can be used to pull the images that are referenced by their tags. Default is false, the mirrors only work when pulling the images that are referenced by their digests. Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. Forcing digest-pulls for mirrors avoids that issue.", + "mirrors": "mirrors is zero or more repositories that may also contain the same images. If the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec. No mirror will be configured. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. Other cluster configuration, including (but not limited to) other repositoryDigestMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering.", +} + +func (RepositoryDigestMirrors) SwaggerDoc() map[string]string { + return map_RepositoryDigestMirrors +} + +var map_ImageDigestMirrorSet = map[string]string{ + "": "ImageDigestMirrorSet holds cluster-wide information about how to handle registry mirror rules on using digest pull specification. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status contains the observed state of the resource.", +} + +func (ImageDigestMirrorSet) SwaggerDoc() map[string]string { + return map_ImageDigestMirrorSet +} + +var map_ImageDigestMirrorSetList = map[string]string{ + "": "ImageDigestMirrorSetList lists the items in the ImageDigestMirrorSet CRD.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ImageDigestMirrorSetList) SwaggerDoc() map[string]string { + return map_ImageDigestMirrorSetList +} + +var map_ImageDigestMirrorSetSpec = map[string]string{ + "": "ImageDigestMirrorSetSpec is the specification of the ImageDigestMirrorSet CRD.", + "imageDigestMirrors": "imageDigestMirrors allows images referenced by image digests in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in imageDigestMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To use mirrors to pull images using tag specification, users should configure a list of mirrors using \"ImageTagMirrorSet\" CRD.\n\nIf the image pull specification matches the repository of \"source\" in multiple imagedigestmirrorset objects, only the objects which define the most specific namespace match will be used. For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as the \"source\", only the objects using quay.io/libpod/busybox are going to apply for pull specification quay.io/libpod/busybox. Each “source” repository is treated independently; configurations for different “source” repositories don’t interact.\n\nIf the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec.\n\nWhen multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. Users who want to use a specific order of mirrors, should configure them into one list of mirrors using the expected order.", +} + +func (ImageDigestMirrorSetSpec) SwaggerDoc() map[string]string { + return map_ImageDigestMirrorSetSpec +} + +var map_ImageDigestMirrors = map[string]string{ + "": "ImageDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config.", + "source": "source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. \"source\" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo [*.]host for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table", + "mirrors": "mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. Images can be pulled from these mirrors only if they are referenced by their digests. The mirrored location is obtained by replacing the part of the input reference that matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo repository to be used. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be pulled from the repository in the pull spec unless explicitly prohibited by \"mirrorSourcePolicy\" Other cluster configuration, including (but not limited to) other imageDigestMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering. \"mirrors\" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table", + "mirrorSourcePolicy": "mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. If unset, the image will continue to be pulled from the the repository in the pull spec. sourcePolicy is valid configuration only when one or more mirrors are in the mirror list.", +} + +func (ImageDigestMirrors) SwaggerDoc() map[string]string { + return map_ImageDigestMirrors +} + +var map_ImageTagMirrorSet = map[string]string{ + "": "ImageTagMirrorSet holds cluster-wide information about how to handle registry mirror rules on using tag pull specification. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status contains the observed state of the resource.", +} + +func (ImageTagMirrorSet) SwaggerDoc() map[string]string { + return map_ImageTagMirrorSet +} + +var map_ImageTagMirrorSetList = map[string]string{ + "": "ImageTagMirrorSetList lists the items in the ImageTagMirrorSet CRD.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ImageTagMirrorSetList) SwaggerDoc() map[string]string { + return map_ImageTagMirrorSetList +} + +var map_ImageTagMirrorSetSpec = map[string]string{ + "": "ImageTagMirrorSetSpec is the specification of the ImageTagMirrorSet CRD.", + "imageTagMirrors": "imageTagMirrors allows images referenced by image tags in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in imageTagMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To use mirrors to pull images using digest specification only, users should configure a list of mirrors using \"ImageDigestMirrorSet\" CRD.\n\nIf the image pull specification matches the repository of \"source\" in multiple imagetagmirrorset objects, only the objects which define the most specific namespace match will be used. For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as the \"source\", only the objects using quay.io/libpod/busybox are going to apply for pull specification quay.io/libpod/busybox. Each “source” repository is treated independently; configurations for different “source” repositories don’t interact.\n\nIf the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec.\n\nWhen multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. Users who want to use a deterministic order of mirrors, should configure them into one list of mirrors using the expected order.", +} + +func (ImageTagMirrorSetSpec) SwaggerDoc() map[string]string { + return map_ImageTagMirrorSetSpec +} + +var map_ImageTagMirrors = map[string]string{ + "": "ImageTagMirrors holds cluster-wide information about how to handle mirrors in the registries config.", + "source": "source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. \"source\" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo [*.]host for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table", + "mirrors": "mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. Images can be pulled from these mirrors only if they are referenced by their tags. The mirrored location is obtained by replacing the part of the input reference that matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo repository to be used. Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. Configuring a list of mirrors using \"ImageDigestMirrorSet\" CRD and forcing digest-pulls for mirrors avoids that issue. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be pulled from the repository in the pull spec unless explicitly prohibited by \"mirrorSourcePolicy\". Other cluster configuration, including (but not limited to) other imageTagMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering. \"mirrors\" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table", + "mirrorSourcePolicy": "mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. If unset, the image will continue to be pulled from the repository in the pull spec. sourcePolicy is valid configuration only when one or more mirrors are in the mirror list.", +} + +func (ImageTagMirrors) SwaggerDoc() map[string]string { + return map_ImageTagMirrors +} + +var map_AWSPlatformSpec = map[string]string{ + "": "AWSPlatformSpec holds the desired state of the Amazon Web Services infrastructure provider. This only includes fields that can be modified in the cluster.", + "serviceEndpoints": "serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", +} + +func (AWSPlatformSpec) SwaggerDoc() map[string]string { + return map_AWSPlatformSpec +} + +var map_AWSPlatformStatus = map[string]string{ + "": "AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.", + "region": "region holds the default AWS region for new AWS resources created by the cluster.", + "serviceEndpoints": "serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", + "resourceTags": "resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user.", + "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.", +} + +func (AWSPlatformStatus) SwaggerDoc() map[string]string { + return map_AWSPlatformStatus +} + +var map_AWSResourceTag = map[string]string{ + "": "AWSResourceTag is a tag to apply to AWS resources created for the cluster.", + "key": "key is the key of the tag", + "value": "value is the value of the tag. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services.", +} + +func (AWSResourceTag) SwaggerDoc() map[string]string { + return map_AWSResourceTag +} + +var map_AWSServiceEndpoint = map[string]string{ + "": "AWSServiceEndpoint store the configuration of a custom url to override existing defaults of AWS Services.", + "name": "name is the name of the AWS service. The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html This must be provided and cannot be empty.", + "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty.", +} + +func (AWSServiceEndpoint) SwaggerDoc() map[string]string { + return map_AWSServiceEndpoint +} + +var map_AlibabaCloudPlatformSpec = map[string]string{ + "": "AlibabaCloudPlatformSpec holds the desired state of the Alibaba Cloud infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (AlibabaCloudPlatformSpec) SwaggerDoc() map[string]string { + return map_AlibabaCloudPlatformSpec +} + +var map_AlibabaCloudPlatformStatus = map[string]string{ + "": "AlibabaCloudPlatformStatus holds the current status of the Alibaba Cloud infrastructure provider.", + "region": "region specifies the region for Alibaba Cloud resources created for the cluster.", + "resourceGroupID": "resourceGroupID is the ID of the resource group for the cluster.", + "resourceTags": "resourceTags is a list of additional tags to apply to Alibaba Cloud resources created for the cluster.", +} + +func (AlibabaCloudPlatformStatus) SwaggerDoc() map[string]string { + return map_AlibabaCloudPlatformStatus +} + +var map_AlibabaCloudResourceTag = map[string]string{ + "": "AlibabaCloudResourceTag is the set of tags to add to apply to resources.", + "key": "key is the key of the tag.", + "value": "value is the value of the tag.", +} + +func (AlibabaCloudResourceTag) SwaggerDoc() map[string]string { + return map_AlibabaCloudResourceTag +} + +var map_AzurePlatformSpec = map[string]string{ + "": "AzurePlatformSpec holds the desired state of the Azure infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (AzurePlatformSpec) SwaggerDoc() map[string]string { + return map_AzurePlatformSpec +} + +var map_AzurePlatformStatus = map[string]string{ + "": "AzurePlatformStatus holds the current status of the Azure infrastructure provider.", + "resourceGroupName": "resourceGroupName is the Resource Group for new Azure resources created for the cluster.", + "networkResourceGroupName": "networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. If empty, the value is same as ResourceGroupName.", + "cloudName": "cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`.", + "armEndpoint": "armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack.", + "resourceTags": "resourceTags is a list of additional tags to apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags for information on tagging Azure resources. Due to limitations on Automation, Content Delivery Network, DNS Azure resources, a maximum of 15 tags may be applied. OpenShift reserves 5 tags for internal use, allowing 10 tags for user configuration.", +} + +func (AzurePlatformStatus) SwaggerDoc() map[string]string { + return map_AzurePlatformStatus +} + +var map_AzureResourceTag = map[string]string{ + "": "AzureResourceTag is a tag to apply to Azure resources created for the cluster.", + "key": "key is the key part of the tag. A tag key can have a maximum of 128 characters and cannot be empty. Key must begin with a letter, end with a letter, number or underscore, and must contain only alphanumeric characters and the following special characters `_ . -`.", + "value": "value is the value part of the tag. A tag value can have a maximum of 256 characters and cannot be empty. Value must contain only alphanumeric characters and the following special characters `_ + , - . / : ; < = > ? @`.", +} + +func (AzureResourceTag) SwaggerDoc() map[string]string { + return map_AzureResourceTag +} + +var map_BareMetalPlatformLoadBalancer = map[string]string{ + "": "BareMetalPlatformLoadBalancer defines the load balancer used by the cluster on BareMetal platform.", + "type": "type defines the type of load balancer used by the cluster on BareMetal platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.", +} + +func (BareMetalPlatformLoadBalancer) SwaggerDoc() map[string]string { + return map_BareMetalPlatformLoadBalancer +} + +var map_BareMetalPlatformSpec = map[string]string{ + "": "BareMetalPlatformSpec holds the desired state of the BareMetal infrastructure provider. This only includes fields that can be modified in the cluster.", + "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.apiServerInternalIPs will be used. Once set, the list cannot be completely removed (but its second entry can).", + "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.ingressIPs will be used. Once set, the list cannot be completely removed (but its second entry can).", + "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, for example \"10.0.0.0/8\" or \"fd00::/8\".", +} + +func (BareMetalPlatformSpec) SwaggerDoc() map[string]string { + return map_BareMetalPlatformSpec +} + +var map_BareMetalPlatformStatus = map[string]string{ + "": "BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider. For more information about the network architecture used with the BareMetal platform type, see: https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.\n\nDeprecated: Use APIServerInternalIPs instead.", + "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.", + "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", + "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", + "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", + "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes.", +} + +func (BareMetalPlatformStatus) SwaggerDoc() map[string]string { + return map_BareMetalPlatformStatus +} + +var map_CloudControllerManagerStatus = map[string]string{ + "": "CloudControllerManagerStatus holds the state of Cloud Controller Manager (a.k.a. CCM or CPI) related settings", + "state": "state determines whether or not an external Cloud Controller Manager is expected to be installed within the cluster. https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager\n\nValid values are \"External\", \"None\" and omitted. When set to \"External\", new nodes will be tainted as uninitialized when created, preventing them from running workloads until they are initialized by the cloud controller manager. When omitted or set to \"None\", new nodes will be not tainted and no extra initialization from the cloud controller manager is expected.", +} + +func (CloudControllerManagerStatus) SwaggerDoc() map[string]string { + return map_CloudControllerManagerStatus +} + +var map_CloudLoadBalancerConfig = map[string]string{ + "": "CloudLoadBalancerConfig contains an union discriminator indicating the type of DNS solution in use within the cluster. When the DNSType is `ClusterHosted`, the cloud's Load Balancer configuration needs to be provided so that the DNS solution hosted within the cluster can be configured with those values.", + "dnsType": "dnsType indicates the type of DNS solution in use within the cluster. Its default value of `PlatformDefault` indicates that the cluster's DNS is the default provided by the cloud platform. It can be set to `ClusterHosted` to bypass the configuration of the cloud default DNS. In this mode, the cluster needs to provide a self-hosted DNS solution for the cluster's installation to succeed. The cluster's use of the cloud's Load Balancers is unaffected by this setting. The value is immutable after it has been set at install time. Currently, there is no way for the customer to add additional DNS entries into the cluster hosted DNS. Enabling this functionality allows the user to start their own DNS solution outside the cluster after installation is complete. The customer would be responsible for configuring this custom DNS solution, and it can be run in addition to the in-cluster DNS solution.", + "clusterHosted": "clusterHosted holds the IP addresses of API, API-Int and Ingress Load Balancers on Cloud Platforms. The DNS solution hosted within the cluster use these IP addresses to provide resolution for API, API-Int and Ingress services.", +} + +func (CloudLoadBalancerConfig) SwaggerDoc() map[string]string { + return map_CloudLoadBalancerConfig +} + +var map_CloudLoadBalancerIPs = map[string]string{ + "": "CloudLoadBalancerIPs contains the Load Balancer IPs for the cloud's API, API-Int and Ingress Load balancers. They will be populated as soon as the respective Load Balancers have been configured. These values are utilized to configure the DNS solution hosted within the cluster.", + "apiIntLoadBalancerIPs": "apiIntLoadBalancerIPs holds Load Balancer IPs for the internal API service. These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. Entries in the apiIntLoadBalancerIPs must be unique. A maximum of 16 IP addresses are permitted.", + "apiLoadBalancerIPs": "apiLoadBalancerIPs holds Load Balancer IPs for the API service. These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. Could be empty for private clusters. Entries in the apiLoadBalancerIPs must be unique. A maximum of 16 IP addresses are permitted.", + "ingressLoadBalancerIPs": "ingressLoadBalancerIPs holds IPs for Ingress Load Balancers. These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. Entries in the ingressLoadBalancerIPs must be unique. A maximum of 16 IP addresses are permitted.", +} + +func (CloudLoadBalancerIPs) SwaggerDoc() map[string]string { + return map_CloudLoadBalancerIPs +} + +var map_EquinixMetalPlatformSpec = map[string]string{ + "": "EquinixMetalPlatformSpec holds the desired state of the Equinix Metal infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (EquinixMetalPlatformSpec) SwaggerDoc() map[string]string { + return map_EquinixMetalPlatformSpec +} + +var map_EquinixMetalPlatformStatus = map[string]string{ + "": "EquinixMetalPlatformStatus holds the current status of the Equinix Metal infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", +} + +func (EquinixMetalPlatformStatus) SwaggerDoc() map[string]string { + return map_EquinixMetalPlatformStatus +} + +var map_ExternalPlatformSpec = map[string]string{ + "": "ExternalPlatformSpec holds the desired state for the generic External infrastructure provider.", + "platformName": "platformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making.", +} + +func (ExternalPlatformSpec) SwaggerDoc() map[string]string { + return map_ExternalPlatformSpec +} + +var map_ExternalPlatformStatus = map[string]string{ + "": "ExternalPlatformStatus holds the current status of the generic External infrastructure provider.", + "cloudControllerManager": "cloudControllerManager contains settings specific to the external Cloud Controller Manager (a.k.a. CCM or CPI). When omitted, new nodes will be not tainted and no extra initialization from the cloud controller manager is expected.", +} + +func (ExternalPlatformStatus) SwaggerDoc() map[string]string { + return map_ExternalPlatformStatus +} + +var map_GCPPlatformSpec = map[string]string{ + "": "GCPPlatformSpec holds the desired state of the Google Cloud Platform infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (GCPPlatformSpec) SwaggerDoc() map[string]string { + return map_GCPPlatformSpec +} + +var map_GCPPlatformStatus = map[string]string{ + "": "GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider.", + "projectID": "resourceGroupName is the Project ID for new GCP resources created for the cluster.", + "region": "region holds the region for new GCP resources created for the cluster.", + "resourceLabels": "resourceLabels is a list of additional labels to apply to GCP resources created for the cluster. See https://cloud.google.com/compute/docs/labeling-resources for information on labeling GCP resources. GCP supports a maximum of 64 labels per resource. OpenShift reserves 32 labels for internal use, allowing 32 labels for user configuration.", + "resourceTags": "resourceTags is a list of additional tags to apply to GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview for information on tagging GCP resources. GCP supports a maximum of 50 tags per resource.", + "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.", +} + +func (GCPPlatformStatus) SwaggerDoc() map[string]string { + return map_GCPPlatformStatus +} + +var map_GCPResourceLabel = map[string]string{ + "": "GCPResourceLabel is a label to apply to GCP resources created for the cluster.", + "key": "key is the key part of the label. A label key can have a maximum of 63 characters and cannot be empty. Label key must begin with a lowercase letter, and must contain only lowercase letters, numeric characters, and the following special characters `_-`. Label key must not have the reserved prefixes `kubernetes-io` and `openshift-io`.", + "value": "value is the value part of the label. A label value can have a maximum of 63 characters and cannot be empty. Value must contain only lowercase letters, numeric characters, and the following special characters `_-`.", +} + +func (GCPResourceLabel) SwaggerDoc() map[string]string { + return map_GCPResourceLabel +} + +var map_GCPResourceTag = map[string]string{ + "": "GCPResourceTag is a tag to apply to GCP resources created for the cluster.", + "parentID": "parentID is the ID of the hierarchical resource where the tags are defined, e.g. at the Organization or the Project level. To find the Organization or Project ID refer to the following pages: https://cloud.google.com/resource-manager/docs/creating-managing-organization#retrieving_your_organization_id, https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects. An OrganizationID must consist of decimal numbers, and cannot have leading zeroes. A ProjectID must be 6 to 30 characters in length, can only contain lowercase letters, numbers, and hyphens, and must start with a letter, and cannot end with a hyphen.", + "key": "key is the key part of the tag. A tag key can have a maximum of 63 characters and cannot be empty. Tag key must begin and end with an alphanumeric character, and must contain only uppercase, lowercase alphanumeric characters, and the following special characters `._-`.", + "value": "value is the value part of the tag. A tag value can have a maximum of 63 characters and cannot be empty. Tag value must begin and end with an alphanumeric character, and must contain only uppercase, lowercase alphanumeric characters, and the following special characters `_-.@%=+:,*#&(){}[]` and spaces.", +} + +func (GCPResourceTag) SwaggerDoc() map[string]string { + return map_GCPResourceTag +} + +var map_IBMCloudPlatformSpec = map[string]string{ + "": "IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (IBMCloudPlatformSpec) SwaggerDoc() map[string]string { + return map_IBMCloudPlatformSpec +} + +var map_IBMCloudPlatformStatus = map[string]string{ + "": "IBMCloudPlatformStatus holds the current status of the IBMCloud infrastructure provider.", + "location": "location is where the cluster has been deployed", + "resourceGroupName": "resourceGroupName is the Resource Group for new IBMCloud resources created for the cluster.", + "providerType": "providerType indicates the type of cluster that was created", + "cisInstanceCRN": "cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", + "dnsInstanceCRN": "dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", + "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM Cloud service. These endpoints are consumed by components within the cluster to reach the respective IBM Cloud Services.", +} + +func (IBMCloudPlatformStatus) SwaggerDoc() map[string]string { + return map_IBMCloudPlatformStatus +} + +var map_IBMCloudServiceEndpoint = map[string]string{ + "": "IBMCloudServiceEndpoint stores the configuration of a custom url to override existing defaults of IBM Cloud Services.", + "name": "name is the name of the IBM Cloud service. Possible values are: CIS, COS, COSConfig, DNSServices, GlobalCatalog, GlobalSearch, GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, ResourceManager, or VPC. For example, the IBM Cloud Private IAM service could be configured with the service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`", + "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty.", +} + +func (IBMCloudServiceEndpoint) SwaggerDoc() map[string]string { + return map_IBMCloudServiceEndpoint +} + +var map_Infrastructure = map[string]string{ + "": "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Infrastructure) SwaggerDoc() map[string]string { + return map_Infrastructure +} + +var map_InfrastructureList = map[string]string{ + "": "InfrastructureList is\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (InfrastructureList) SwaggerDoc() map[string]string { + return map_InfrastructureList +} + +var map_InfrastructureSpec = map[string]string{ + "": "InfrastructureSpec contains settings that apply to the cluster infrastructure.", + "cloudConfig": "cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. This configuration file is used to configure the Kubernetes cloud provider integration when using the built-in cloud provider integration or the external cloud controller manager. The namespace for this config map is openshift-config.\n\ncloudConfig should only be consumed by the kube_cloud_config controller. The controller is responsible for using the user configuration in the spec for various platforms and combining that with the user provided ConfigMap in this field to create a stitched kube cloud config. The controller generates a ConfigMap `kube-cloud-config` in `openshift-config-managed` namespace with the kube cloud config is stored in `cloud.conf` key. All the clients are expected to use the generated ConfigMap only.", + "platformSpec": "platformSpec holds desired information specific to the underlying infrastructure provider.", +} + +func (InfrastructureSpec) SwaggerDoc() map[string]string { + return map_InfrastructureSpec +} + +var map_InfrastructureStatus = map[string]string{ + "": "InfrastructureStatus describes the infrastructure the cluster is leveraging.", + "infrastructureName": "infrastructureName uniquely identifies a cluster with a human friendly name. Once set it should not be changed. Must be of max length 27 and must have only alphanumeric or hyphen characters.", + "platform": "platform is the underlying infrastructure provider for the cluster.\n\nDeprecated: Use platformStatus.type instead.", + "platformStatus": "platformStatus holds status information specific to the underlying infrastructure provider.", + "etcdDiscoveryDomain": "etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release.", + "apiServerURL": "apiServerURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerURL can be used by components like the web console to tell users where to find the Kubernetes API.", + "apiServerInternalURI": "apiServerInternalURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components like kubelets, to contact the Kubernetes API server using the infrastructure provider rather than Kubernetes networking.", + "controlPlaneTopology": "controlPlaneTopology expresses the expectations for operands that normally run on control nodes. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster.", + "infrastructureTopology": "infrastructureTopology expresses the expectations for infrastructure services that do not run on control plane nodes, usually indicated by a node selector for a `role` value other than `master`. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation NOTE: External topology mode is not applicable for this field.", + "cpuPartitioning": "cpuPartitioning expresses if CPU partitioning is a currently enabled feature in the cluster. CPU Partitioning means that this cluster can support partitioning workloads to specific CPU Sets. Valid values are \"None\" and \"AllNodes\". When omitted, the default value is \"None\". The default value of \"None\" indicates that no nodes will be setup with CPU partitioning. The \"AllNodes\" value indicates that all nodes have been setup with CPU partitioning, and can then be further configured via the PerformanceProfile API.", +} + +func (InfrastructureStatus) SwaggerDoc() map[string]string { + return map_InfrastructureStatus +} + +var map_KubevirtPlatformSpec = map[string]string{ + "": "KubevirtPlatformSpec holds the desired state of the kubevirt infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (KubevirtPlatformSpec) SwaggerDoc() map[string]string { + return map_KubevirtPlatformSpec +} + +var map_KubevirtPlatformStatus = map[string]string{ + "": "KubevirtPlatformStatus holds the current status of the kubevirt infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", +} + +func (KubevirtPlatformStatus) SwaggerDoc() map[string]string { + return map_KubevirtPlatformStatus +} + +var map_NutanixFailureDomain = map[string]string{ + "": "NutanixFailureDomain configures failure domain information for the Nutanix platform.", + "name": "name defines the unique name of a failure domain. Name is required and must be at most 64 characters in length. It must consist of only lower case alphanumeric characters and hyphens (-). It must start and end with an alphanumeric character. This value is arbitrary and is used to identify the failure domain within the platform.", + "cluster": "cluster is to identify the cluster (the Prism Element under management of the Prism Central), in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained from the Prism Central console or using the prism_central API.", + "subnets": "subnets holds a list of identifiers (one or more) of the cluster's network subnets If the feature gate NutanixMultiSubnets is enabled, up to 32 subnets may be configured. for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API.", +} + +func (NutanixFailureDomain) SwaggerDoc() map[string]string { + return map_NutanixFailureDomain +} + +var map_NutanixPlatformLoadBalancer = map[string]string{ + "": "NutanixPlatformLoadBalancer defines the load balancer used by the cluster on Nutanix platform.", + "type": "type defines the type of load balancer used by the cluster on Nutanix platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.", +} + +func (NutanixPlatformLoadBalancer) SwaggerDoc() map[string]string { + return map_NutanixPlatformLoadBalancer +} + +var map_NutanixPlatformSpec = map[string]string{ + "": "NutanixPlatformSpec holds the desired state of the Nutanix infrastructure provider. This only includes fields that can be modified in the cluster.", + "prismCentral": "prismCentral holds the endpoint address and port to access the Nutanix Prism Central. When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list.", + "prismElements": "prismElements holds one or more endpoint address and port data to access the Nutanix Prism Elements (clusters) of the Nutanix Prism Central. Currently we only support one Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.) used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.) spread over multiple Prism Elements (clusters) of the Prism Central.", + "failureDomains": "failureDomains configures failure domains information for the Nutanix platform. When set, the failure domains defined here may be used to spread Machines across prism element clusters to improve fault tolerance of the cluster.", +} + +func (NutanixPlatformSpec) SwaggerDoc() map[string]string { + return map_NutanixPlatformSpec +} + +var map_NutanixPlatformStatus = map[string]string{ + "": "NutanixPlatformStatus holds the current status of the Nutanix infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.\n\nDeprecated: Use APIServerInternalIPs instead.", + "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.", + "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", + "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", +} + +func (NutanixPlatformStatus) SwaggerDoc() map[string]string { + return map_NutanixPlatformStatus +} + +var map_NutanixPrismElementEndpoint = map[string]string{ + "": "NutanixPrismElementEndpoint holds the name and endpoint data for a Prism Element (cluster)", + "name": "name is the name of the Prism Element (cluster). This value will correspond with the cluster field configured on other resources (eg Machines, PVCs, etc).", + "endpoint": "endpoint holds the endpoint address and port data of the Prism Element (cluster). When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list.", +} + +func (NutanixPrismElementEndpoint) SwaggerDoc() map[string]string { + return map_NutanixPrismElementEndpoint +} + +var map_NutanixPrismEndpoint = map[string]string{ + "": "NutanixPrismEndpoint holds the endpoint address and port to access the Nutanix Prism Central or Element (cluster)", + "address": "address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster)", + "port": "port is the port number to access the Nutanix Prism Central or Element (cluster)", +} + +func (NutanixPrismEndpoint) SwaggerDoc() map[string]string { + return map_NutanixPrismEndpoint +} + +var map_NutanixResourceIdentifier = map[string]string{ + "": "NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.)", + "type": "type is the identifier type to use for this resource.", + "uuid": "uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID.", + "name": "name is the resource name in the PC. It cannot be empty if the type is Name.", +} + +func (NutanixResourceIdentifier) SwaggerDoc() map[string]string { + return map_NutanixResourceIdentifier +} + +var map_OpenStackPlatformLoadBalancer = map[string]string{ + "": "OpenStackPlatformLoadBalancer defines the load balancer used by the cluster on OpenStack platform.", + "type": "type defines the type of load balancer used by the cluster on OpenStack platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.", +} + +func (OpenStackPlatformLoadBalancer) SwaggerDoc() map[string]string { + return map_OpenStackPlatformLoadBalancer +} + +var map_OpenStackPlatformSpec = map[string]string{ + "": "OpenStackPlatformSpec holds the desired state of the OpenStack infrastructure provider. This only includes fields that can be modified in the cluster.", + "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.apiServerInternalIPs will be used. Once set, the list cannot be completely removed (but its second entry can).", + "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.ingressIPs will be used. Once set, the list cannot be completely removed (but its second entry can).", + "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, for example \"10.0.0.0/8\" or \"fd00::/8\".", +} + +func (OpenStackPlatformSpec) SwaggerDoc() map[string]string { + return map_OpenStackPlatformSpec +} + +var map_OpenStackPlatformStatus = map[string]string{ + "": "OpenStackPlatformStatus holds the current status of the OpenStack infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.\n\nDeprecated: Use APIServerInternalIPs instead.", + "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one.", + "cloudName": "cloudName is the name of the desired OpenStack cloud in the client configuration file (`clouds.yaml`).", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.", + "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", + "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for OpenStack deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", + "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", + "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes.", +} + +func (OpenStackPlatformStatus) SwaggerDoc() map[string]string { + return map_OpenStackPlatformStatus +} + +var map_OvirtPlatformLoadBalancer = map[string]string{ + "": "OvirtPlatformLoadBalancer defines the load balancer used by the cluster on Ovirt platform.", + "type": "type defines the type of load balancer used by the cluster on Ovirt platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.", +} + +func (OvirtPlatformLoadBalancer) SwaggerDoc() map[string]string { + return map_OvirtPlatformLoadBalancer +} + +var map_OvirtPlatformSpec = map[string]string{ + "": "OvirtPlatformSpec holds the desired state of the oVirt infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (OvirtPlatformSpec) SwaggerDoc() map[string]string { + return map_OvirtPlatformSpec +} + +var map_OvirtPlatformStatus = map[string]string{ + "": "OvirtPlatformStatus holds the current status of the oVirt infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.\n\nDeprecated: Use APIServerInternalIPs instead.", + "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.", + "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", + "nodeDNSIP": "deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release.", + "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", +} + +func (OvirtPlatformStatus) SwaggerDoc() map[string]string { + return map_OvirtPlatformStatus +} + +var map_PlatformSpec = map[string]string{ + "": "PlatformSpec holds the desired state specific to the underlying infrastructure provider of the current cluster. Since these are used at spec-level for the underlying cluster, it is supposed that only one of the spec structs is set.", + "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"KubeVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", + "aws": "aws contains settings specific to the Amazon Web Services infrastructure provider.", + "azure": "azure contains settings specific to the Azure infrastructure provider.", + "gcp": "gcp contains settings specific to the Google Cloud Platform infrastructure provider.", + "baremetal": "baremetal contains settings specific to the BareMetal platform.", + "openstack": "openstack contains settings specific to the OpenStack infrastructure provider.", + "ovirt": "ovirt contains settings specific to the oVirt infrastructure provider.", + "vsphere": "vsphere contains settings specific to the VSphere infrastructure provider.", + "ibmcloud": "ibmcloud contains settings specific to the IBMCloud infrastructure provider.", + "kubevirt": "kubevirt contains settings specific to the kubevirt infrastructure provider.", + "equinixMetal": "equinixMetal contains settings specific to the Equinix Metal infrastructure provider.", + "powervs": "powervs contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider.", + "alibabaCloud": "alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", + "nutanix": "nutanix contains settings specific to the Nutanix infrastructure provider.", + "external": "ExternalPlatformType represents generic infrastructure provider. Platform-specific components should be supplemented separately.", +} + +func (PlatformSpec) SwaggerDoc() map[string]string { + return map_PlatformSpec +} + +var map_PlatformStatus = map[string]string{ + "": "PlatformStatus holds the current status specific to the underlying infrastructure provider of the current cluster. Since these are used at status-level for the underlying cluster, it is supposed that only one of the status structs is set.", + "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.\n\nThis value will be synced with to the `status.platform` and `status.platformStatus.type`. Currently this value cannot be changed once set.", + "aws": "aws contains settings specific to the Amazon Web Services infrastructure provider.", + "azure": "azure contains settings specific to the Azure infrastructure provider.", + "gcp": "gcp contains settings specific to the Google Cloud Platform infrastructure provider.", + "baremetal": "baremetal contains settings specific to the BareMetal platform.", + "openstack": "openstack contains settings specific to the OpenStack infrastructure provider.", + "ovirt": "ovirt contains settings specific to the oVirt infrastructure provider.", + "vsphere": "vsphere contains settings specific to the VSphere infrastructure provider.", + "ibmcloud": "ibmcloud contains settings specific to the IBMCloud infrastructure provider.", + "kubevirt": "kubevirt contains settings specific to the kubevirt infrastructure provider.", + "equinixMetal": "equinixMetal contains settings specific to the Equinix Metal infrastructure provider.", + "powervs": "powervs contains settings specific to the Power Systems Virtual Servers infrastructure provider.", + "alibabaCloud": "alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", + "nutanix": "nutanix contains settings specific to the Nutanix infrastructure provider.", + "external": "external contains settings specific to the generic External infrastructure provider.", +} + +func (PlatformStatus) SwaggerDoc() map[string]string { + return map_PlatformStatus +} + +var map_PowerVSPlatformSpec = map[string]string{ + "": "PowerVSPlatformSpec holds the desired state of the IBM Power Systems Virtual Servers infrastructure provider. This only includes fields that can be modified in the cluster.", + "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service.", +} + +func (PowerVSPlatformSpec) SwaggerDoc() map[string]string { + return map_PowerVSPlatformSpec +} + +var map_PowerVSPlatformStatus = map[string]string{ + "": "PowerVSPlatformStatus holds the current status of the IBM Power Systems Virtual Servers infrastrucutre provider.", + "region": "region holds the default Power VS region for new Power VS resources created by the cluster.", + "zone": "zone holds the default zone for the new Power VS resources created by the cluster. Note: Currently only single-zone OCP clusters are supported", + "resourceGroup": "resourceGroup is the resource group name for new IBMCloud resources created for a cluster. The resource group specified here will be used by cluster-image-registry-operator to set up a COS Instance in IBMCloud for the cluster registry. More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. When omitted, the image registry operator won't be able to configure storage, which results in the image registry cluster operator not being in an available state.", + "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service.", + "cisInstanceCRN": "cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", + "dnsInstanceCRN": "dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", +} + +func (PowerVSPlatformStatus) SwaggerDoc() map[string]string { + return map_PowerVSPlatformStatus +} + +var map_PowerVSServiceEndpoint = map[string]string{ + "": "PowervsServiceEndpoint stores the configuration of a custom url to override existing defaults of PowerVS Services.", + "name": "name is the name of the Power VS service. Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud", + "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty.", +} + +func (PowerVSServiceEndpoint) SwaggerDoc() map[string]string { + return map_PowerVSServiceEndpoint +} + +var map_VSphereFailureDomainHostGroup = map[string]string{ + "": "VSphereFailureDomainHostGroup holds the vmGroup and the hostGroup names in vCenter corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also contains the vmHostRule which is an affinity vm-host rule in vCenter.", + "vmGroup": "vmGroup is the name of the vm-host group of type virtual machine within vCenter for this failure domain. vmGroup is limited to 80 characters. This field is required when the VSphereFailureDomain ZoneType is HostGroup", + "hostGroup": "hostGroup is the name of the vm-host group of type host within vCenter for this failure domain. hostGroup is limited to 80 characters. This field is required when the VSphereFailureDomain ZoneType is HostGroup", + "vmHostRule": "vmHostRule is the name of the affinity vm-host rule within vCenter for this failure domain. vmHostRule is limited to 80 characters. This field is required when the VSphereFailureDomain ZoneType is HostGroup", +} + +func (VSphereFailureDomainHostGroup) SwaggerDoc() map[string]string { + return map_VSphereFailureDomainHostGroup +} + +var map_VSphereFailureDomainRegionAffinity = map[string]string{ + "": "VSphereFailureDomainRegionAffinity contains the region type which is the string representation of the VSphereFailureDomainRegionType with available options of Datacenter and ComputeCluster.", + "type": "type determines the vSphere object type for a region within this failure domain. Available types are Datacenter and ComputeCluster. When set to Datacenter, this means the vCenter Datacenter defined is the region. When set to ComputeCluster, this means the vCenter cluster defined is the region.", +} + +func (VSphereFailureDomainRegionAffinity) SwaggerDoc() map[string]string { + return map_VSphereFailureDomainRegionAffinity +} + +var map_VSphereFailureDomainZoneAffinity = map[string]string{ + "": "VSphereFailureDomainZoneAffinity contains the vCenter cluster vm-host group (virtual machine and host types) and the vm-host affinity rule that together creates an affinity configuration for vm-host based zonal. This configuration within vCenter creates the required association between a failure domain, virtual machines and ESXi hosts to create a vm-host based zone.", + "type": "type determines the vSphere object type for a zone within this failure domain. Available types are ComputeCluster and HostGroup. When set to ComputeCluster, this means the vCenter cluster defined is the zone. When set to HostGroup, hostGroup must be configured with hostGroup, vmGroup and vmHostRule and this means the zone is defined by the grouping of those fields.", + "hostGroup": "hostGroup holds the vmGroup and the hostGroup names in vCenter corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also contains the vmHostRule which is an affinity vm-host rule in vCenter.", +} + +func (VSphereFailureDomainZoneAffinity) SwaggerDoc() map[string]string { + return map_VSphereFailureDomainZoneAffinity +} + +var map_VSpherePlatformFailureDomainSpec = map[string]string{ + "": "VSpherePlatformFailureDomainSpec holds the region and zone failure domain and the vCenter topology of that failure domain.", + "name": "name defines the arbitrary but unique name of a failure domain.", + "region": "region defines the name of a region tag that will be attached to a vCenter datacenter. The tag category in vCenter must be named openshift-region.", + "zone": "zone defines the name of a zone tag that will be attached to a vCenter cluster. The tag category in vCenter must be named openshift-zone.", + "regionAffinity": "regionAffinity holds the type of region, Datacenter or ComputeCluster. When set to Datacenter, this means the region is a vCenter Datacenter as defined in topology. When set to ComputeCluster, this means the region is a vCenter Cluster as defined in topology.", + "zoneAffinity": "zoneAffinity holds the type of the zone and the hostGroup which vmGroup and the hostGroup names in vCenter corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also contains the vmHostRule which is an affinity vm-host rule in vCenter.", + "server": "server is the fully-qualified domain name or the IP address of the vCenter server.", + "topology": "topology describes a given failure domain using vSphere constructs", +} + +func (VSpherePlatformFailureDomainSpec) SwaggerDoc() map[string]string { + return map_VSpherePlatformFailureDomainSpec +} + +var map_VSpherePlatformLoadBalancer = map[string]string{ + "": "VSpherePlatformLoadBalancer defines the load balancer used by the cluster on VSphere platform.", + "type": "type defines the type of load balancer used by the cluster on VSphere platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.", +} + +func (VSpherePlatformLoadBalancer) SwaggerDoc() map[string]string { + return map_VSpherePlatformLoadBalancer +} + +var map_VSpherePlatformNodeNetworking = map[string]string{ + "": "VSpherePlatformNodeNetworking holds the external and internal node networking spec.", + "external": "external represents the network configuration of the node that is externally routable.", + "internal": "internal represents the network configuration of the node that is routable only within the cluster.", +} + +func (VSpherePlatformNodeNetworking) SwaggerDoc() map[string]string { + return map_VSpherePlatformNodeNetworking +} + +var map_VSpherePlatformNodeNetworkingSpec = map[string]string{ + "": "VSpherePlatformNodeNetworkingSpec holds the network CIDR(s) and port group name for including and excluding IP ranges in the cloud provider. This would be used for example when multiple network adapters are attached to a guest to help determine which IP address the cloud config manager should use for the external and internal node networking.", + "networkSubnetCidr": "networkSubnetCidr IP address on VirtualMachine's network interfaces included in the fields' CIDRs that will be used in respective status.addresses fields.", + "network": "network VirtualMachine's VM Network names that will be used to when searching for status.addresses fields. Note that if internal.networkSubnetCIDR and external.networkSubnetCIDR are not set, then the vNIC associated to this network must only have a single IP address assigned to it. The available networks (port groups) can be listed using `govc ls 'network/*'`", + "excludeNetworkSubnetCidr": "excludeNetworkSubnetCidr IP addresses in subnet ranges will be excluded when selecting the IP address from the VirtualMachine's VM for use in the status.addresses fields.", +} + +func (VSpherePlatformNodeNetworkingSpec) SwaggerDoc() map[string]string { + return map_VSpherePlatformNodeNetworkingSpec +} + +var map_VSpherePlatformSpec = map[string]string{ + "": "VSpherePlatformSpec holds the desired state of the vSphere infrastructure provider. In the future the cloud provider operator, storage operator and machine operator will use these fields for configuration.", + "vcenters": "vcenters holds the connection details for services to communicate with vCenter. Currently, only a single vCenter is supported, but in tech preview 3 vCenters are supported. Once the cluster has been installed, you are unable to change the current number of defined vCenters except in the case where the cluster has been upgraded from a version of OpenShift where the vsphere platform spec was not present. You may make modifications to the existing vCenters that are defined in the vcenters list in order to match with any added or modified failure domains.", + "failureDomains": "failureDomains contains the definition of region, zone and the vCenter topology. If this is omitted failure domains (regions and zones) will not be used.", + "nodeNetworking": "nodeNetworking contains the definition of internal and external network constraints for assigning the node's networking. If this field is omitted, networking defaults to the legacy address selection behavior which is to only support a single address and return the first one found.", + "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.apiServerInternalIPs will be used. Once set, the list cannot be completely removed (but its second entry can).", + "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.ingressIPs will be used. Once set, the list cannot be completely removed (but its second entry can).", + "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, for example \"10.0.0.0/8\" or \"fd00::/8\".", +} + +func (VSpherePlatformSpec) SwaggerDoc() map[string]string { + return map_VSpherePlatformSpec +} + +var map_VSpherePlatformStatus = map[string]string{ + "": "VSpherePlatformStatus holds the current status of the vSphere infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.\n\nDeprecated: Use APIServerInternalIPs instead.", + "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.", + "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", + "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for vSphere deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", + "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", + "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes.", +} + +func (VSpherePlatformStatus) SwaggerDoc() map[string]string { + return map_VSpherePlatformStatus +} + +var map_VSpherePlatformTopology = map[string]string{ + "": "VSpherePlatformTopology holds the required and optional vCenter objects - datacenter, computeCluster, networks, datastore and resourcePool - to provision virtual machines.", + "datacenter": "datacenter is the name of vCenter datacenter in which virtual machines will be located. The maximum length of the datacenter name is 80 characters.", + "computeCluster": "computeCluster the absolute path of the vCenter cluster in which virtual machine will be located. The absolute path is of the form //host/. The maximum length of the path is 2048 characters.", + "networks": "networks is the list of port group network names within this failure domain. If feature gate VSphereMultiNetworks is enabled, up to 10 network adapters may be defined. 10 is the maximum number of virtual network devices which may be attached to a VM as defined by: https://configmax.esp.vmware.com/guest?vmwareproduct=vSphere&release=vSphere%208.0&categories=1-0 The available networks (port groups) can be listed using `govc ls 'network/*'` Networks should be in the form of an absolute path: //network/.", + "datastore": "datastore is the absolute path of the datastore in which the virtual machine is located. The absolute path is of the form //datastore/ The maximum length of the path is 2048 characters.", + "resourcePool": "resourcePool is the absolute path of the resource pool where virtual machines will be created. The absolute path is of the form //host//Resources/. The maximum length of the path is 2048 characters.", + "folder": "folder is the absolute path of the folder where virtual machines are located. The absolute path is of the form //vm/. The maximum length of the path is 2048 characters.", + "template": "template is the full inventory path of the virtual machine or template that will be cloned when creating new machines in this failure domain. The maximum length of the path is 2048 characters.\n\nWhen omitted, the template will be calculated by the control plane machineset operator based on the region and zone defined in VSpherePlatformFailureDomainSpec. For example, for zone=zonea, region=region1, and infrastructure name=test, the template path would be calculated as //vm/test-rhcos-region1-zonea.", +} + +func (VSpherePlatformTopology) SwaggerDoc() map[string]string { + return map_VSpherePlatformTopology +} + +var map_VSpherePlatformVCenterSpec = map[string]string{ + "": "VSpherePlatformVCenterSpec stores the vCenter connection fields. This is used by the vSphere CCM.", + "server": "server is the fully-qualified domain name or the IP address of the vCenter server.", + "port": "port is the TCP port that will be used to communicate to the vCenter endpoint. When omitted, this means the user has no opinion and it is up to the platform to choose a sensible default, which is subject to change over time.", + "datacenters": "The vCenter Datacenters in which the RHCOS vm guests are located. This field will be used by the Cloud Controller Manager. Each datacenter listed here should be used within a topology.", +} + +func (VSpherePlatformVCenterSpec) SwaggerDoc() map[string]string { + return map_VSpherePlatformVCenterSpec +} + +var map_AWSIngressSpec = map[string]string{ + "": "AWSIngressSpec holds the desired state of the Ingress for Amazon Web Services infrastructure provider. This only includes fields that can be modified in the cluster.", + "type": "type allows user to set a load balancer type. When this field is set the default ingresscontroller will get created using the specified LBType. If this field is not set then the default ingress controller of LBType Classic will be created. Valid values are:\n\n* \"Classic\": A Classic Load Balancer that makes routing decisions at either\n the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See\n the following for additional details:\n\n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb\n\n* \"NLB\": A Network Load Balancer that makes routing decisions at the\n transport layer (TCP/SSL). See the following for additional details:\n\n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb", +} + +func (AWSIngressSpec) SwaggerDoc() map[string]string { + return map_AWSIngressSpec +} + +var map_ComponentRouteSpec = map[string]string{ + "": "ComponentRouteSpec allows for configuration of a route's hostname and serving certificate.", + "namespace": "namespace is the namespace of the route to customize.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized.", + "name": "name is the logical name of the route to customize.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized.", + "hostname": "hostname is the hostname that should be used by the route.", + "servingCertKeyPairSecret": "servingCertKeyPairSecret is a reference to a secret of type `kubernetes.io/tls` in the openshift-config namespace. The serving cert/key pair must match and will be used by the operator to fulfill the intent of serving with this name. If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed.", +} + +func (ComponentRouteSpec) SwaggerDoc() map[string]string { + return map_ComponentRouteSpec +} + +var map_ComponentRouteStatus = map[string]string{ + "": "ComponentRouteStatus contains information allowing configuration of a route's hostname and serving certificate.", + "namespace": "namespace is the namespace of the route to customize. It must be a real namespace. Using an actual namespace ensures that no two components will conflict and the same component can be installed multiple times.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized.", + "name": "name is the logical name of the route to customize. It does not have to be the actual name of a route resource but it cannot be renamed.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized.", + "defaultHostname": "defaultHostname is the hostname of this route prior to customization.", + "consumingUsers": "consumingUsers is a slice of ServiceAccounts that need to have read permission on the servingCertKeyPairSecret secret.", + "currentHostnames": "currentHostnames is the list of current names used by the route. Typically, this list should consist of a single hostname, but if multiple hostnames are supported by the route the operator may write multiple entries to this list.", + "conditions": "conditions are used to communicate the state of the componentRoutes entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf available is true, the content served by the route can be accessed by users. This includes cases where a default may continue to serve content while the customized route specified by the cluster-admin is being configured.\n\nIf Degraded is true, that means something has gone wrong trying to handle the componentRoutes entry. The currentHostnames field may or may not be in effect.\n\nIf Progressing is true, that means the component is taking some action related to the componentRoutes entry.", + "relatedObjects": "relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied.", +} + +func (ComponentRouteStatus) SwaggerDoc() map[string]string { + return map_ComponentRouteStatus +} + +var map_Ingress = map[string]string{ + "": "Ingress holds cluster-wide information about ingress, including the default ingress domain used for routes. The canonical name is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Ingress) SwaggerDoc() map[string]string { + return map_Ingress +} + +var map_IngressList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (IngressList) SwaggerDoc() map[string]string { + return map_IngressList +} + +var map_IngressPlatformSpec = map[string]string{ + "": "IngressPlatformSpec holds the desired state of Ingress specific to the underlying infrastructure provider of the current cluster. Since these are used at spec-level for the underlying cluster, it is supposed that only one of the spec structs is set.", + "type": "type is the underlying infrastructure provider for the cluster. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"KubeVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", + "aws": "aws contains settings specific to the Amazon Web Services infrastructure provider.", +} + +func (IngressPlatformSpec) SwaggerDoc() map[string]string { + return map_IngressPlatformSpec +} + +var map_IngressSpec = map[string]string{ + "domain": "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"..\".\n\nIt is also used as the default wildcard domain suffix for ingress. The default ingresscontroller domain will follow this pattern: \"*.\".\n\nOnce set, changing domain is not currently supported.", + "appsDomain": "appsDomain is an optional domain to use instead of the one specified in the domain field when a Route is created without specifying an explicit host. If appsDomain is nonempty, this value is used to generate default host values for Route. Unlike domain, appsDomain may be modified after installation. This assumes a new ingresscontroller has been setup with a wildcard certificate.", + "componentRoutes": "componentRoutes is an optional list of routes that are managed by OpenShift components that a cluster-admin is able to configure the hostname and serving certificate for. The namespace and name of each route in this list should match an existing entry in the status.componentRoutes list.\n\nTo determine the set of configurable Routes, look at namespace and name of entries in the .status.componentRoutes list, where participating operators write the status of configurable routes.", + "requiredHSTSPolicies": "requiredHSTSPolicies specifies HSTS policies that are required to be set on newly created or updated routes matching the domainPattern/s and namespaceSelector/s that are specified in the policy. Each requiredHSTSPolicy must have at least a domainPattern and a maxAge to validate a route HSTS Policy route annotation, and affect route admission.\n\nA candidate route is checked for HSTS Policies if it has the HSTS Policy route annotation: \"haproxy.router.openshift.io/hsts_header\" E.g. haproxy.router.openshift.io/hsts_header: max-age=31536000;preload;includeSubDomains\n\n- For each candidate route, if it matches a requiredHSTSPolicy domainPattern and optional namespaceSelector, then the maxAge, preloadPolicy, and includeSubdomainsPolicy must be valid to be admitted. Otherwise, the route is rejected. - The first match, by domainPattern and optional namespaceSelector, in the ordering of the RequiredHSTSPolicies determines the route's admission status. - If the candidate route doesn't match any requiredHSTSPolicy domainPattern and optional namespaceSelector, then it may use any HSTS Policy annotation.\n\nThe HSTS policy configuration may be changed after routes have already been created. An update to a previously admitted route may then fail if the updated route does not conform to the updated HSTS policy configuration. However, changing the HSTS policy configuration will not cause a route that is already admitted to stop working.\n\nNote that if there are no RequiredHSTSPolicies, any HSTS Policy annotation on the route is valid.", + "loadBalancer": "loadBalancer contains the load balancer details in general which are not only specific to the underlying infrastructure provider of the current cluster and are required for Ingress Controller to work on OpenShift.", +} + +func (IngressSpec) SwaggerDoc() map[string]string { + return map_IngressSpec +} + +var map_IngressStatus = map[string]string{ + "componentRoutes": "componentRoutes is where participating operators place the current route status for routes whose hostnames and serving certificates can be customized by the cluster-admin.", + "defaultPlacement": "defaultPlacement is set at installation time to control which nodes will host the ingress router pods by default. The options are control-plane nodes or worker nodes.\n\nThis field works by dictating how the Cluster Ingress Operator will consider unset replicas and nodePlacement fields in IngressController resources when creating the corresponding Deployments.\n\nSee the documentation for the IngressController replicas and nodePlacement fields for more information.\n\nWhen omitted, the default value is Workers", +} + +func (IngressStatus) SwaggerDoc() map[string]string { + return map_IngressStatus +} + +var map_LoadBalancer = map[string]string{ + "platform": "platform holds configuration specific to the underlying infrastructure provider for the ingress load balancers. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time.", +} + +func (LoadBalancer) SwaggerDoc() map[string]string { + return map_LoadBalancer +} + +var map_ClusterNetworkEntry = map[string]string{ + "": "ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated.", + "cidr": "The complete block for pod IPs.", + "hostPrefix": "The size (prefix) of block to allocate to each node. If this field is not used by the plugin, it can be left unset.", +} + +func (ClusterNetworkEntry) SwaggerDoc() map[string]string { + return map_ClusterNetworkEntry +} + +var map_ExternalIPConfig = map[string]string{ + "": "ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field of a Service resource.", + "policy": "policy is a set of restrictions applied to the ExternalIP field. If nil or empty, then ExternalIP is not allowed to be set.", + "autoAssignCIDRs": "autoAssignCIDRs is a list of CIDRs from which to automatically assign Service.ExternalIP. These are assigned when the service is of type LoadBalancer. In general, this is only useful for bare-metal clusters. In Openshift 3.x, this was misleadingly called \"IngressIPs\". Automatically assigned External IPs are not affected by any ExternalIPPolicy rules. Currently, only one entry may be provided.", +} + +func (ExternalIPConfig) SwaggerDoc() map[string]string { + return map_ExternalIPConfig +} + +var map_ExternalIPPolicy = map[string]string{ + "": "ExternalIPPolicy configures exactly which IPs are allowed for the ExternalIP field in a Service. If the zero struct is supplied, then none are permitted. The policy controller always allows automatically assigned external IPs.", + "allowedCIDRs": "allowedCIDRs is the list of allowed CIDRs.", + "rejectedCIDRs": "rejectedCIDRs is the list of disallowed CIDRs. These take precedence over allowedCIDRs.", +} + +func (ExternalIPPolicy) SwaggerDoc() map[string]string { + return map_ExternalIPPolicy +} + +var map_MTUMigration = map[string]string{ + "": "MTUMigration contains infomation about MTU migration.", + "network": "network contains MTU migration configuration for the default network.", + "machine": "machine contains MTU migration configuration for the machine's uplink.", +} + +func (MTUMigration) SwaggerDoc() map[string]string { + return map_MTUMigration +} + +var map_MTUMigrationValues = map[string]string{ + "": "MTUMigrationValues contains the values for a MTU migration.", + "to": "to is the MTU to migrate to.", + "from": "from is the MTU to migrate from.", +} + +func (MTUMigrationValues) SwaggerDoc() map[string]string { + return map_MTUMigrationValues +} + +var map_Network = map[string]string{ + "": "Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. Please view network.spec for an explanation on what applies when configuring this resource.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Network) SwaggerDoc() map[string]string { + return map_Network +} + +var map_NetworkDiagnostics = map[string]string{ + "mode": "mode controls the network diagnostics mode\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is All.", + "sourcePlacement": "sourcePlacement controls the scheduling of network diagnostics source deployment\n\nSee NetworkDiagnosticsSourcePlacement for more details about default values.", + "targetPlacement": "targetPlacement controls the scheduling of network diagnostics target daemonset\n\nSee NetworkDiagnosticsTargetPlacement for more details about default values.", +} + +func (NetworkDiagnostics) SwaggerDoc() map[string]string { + return map_NetworkDiagnostics +} + +var map_NetworkDiagnosticsSourcePlacement = map[string]string{ + "": "NetworkDiagnosticsSourcePlacement defines node scheduling configuration network diagnostics source components", + "nodeSelector": "nodeSelector is the node selector applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `kubernetes.io/os: linux`.", + "tolerations": "tolerations is a list of tolerations applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is an empty list.", +} + +func (NetworkDiagnosticsSourcePlacement) SwaggerDoc() map[string]string { + return map_NetworkDiagnosticsSourcePlacement +} + +var map_NetworkDiagnosticsTargetPlacement = map[string]string{ + "": "NetworkDiagnosticsTargetPlacement defines node scheduling configuration network diagnostics target components", + "nodeSelector": "nodeSelector is the node selector applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `kubernetes.io/os: linux`.", + "tolerations": "tolerations is a list of tolerations applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `- operator: \"Exists\"` which means that all taints are tolerated.", +} + +func (NetworkDiagnosticsTargetPlacement) SwaggerDoc() map[string]string { + return map_NetworkDiagnosticsTargetPlacement +} + +var map_NetworkList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (NetworkList) SwaggerDoc() map[string]string { + return map_NetworkList +} + +var map_NetworkMigration = map[string]string{ + "": "NetworkMigration represents the network migration status.", + "networkType": "networkType is the target plugin that is being deployed. DEPRECATED: network type migration is no longer supported, so this should always be unset.", + "mtu": "mtu is the MTU configuration that is being deployed.", +} + +func (NetworkMigration) SwaggerDoc() map[string]string { + return map_NetworkMigration +} + +var map_NetworkSpec = map[string]string{ + "": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.", + "clusterNetwork": "IP address pool to use for pod IPs. This field is immutable after installation.", + "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation.", + "networkType": "networkType is the plugin that is to be deployed (e.g. OVNKubernetes). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OVNKubernetes This field is immutable after installation.", + "externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set.", + "serviceNodePortRange": "The port range allowed for Services of type NodePort. If not specified, the default of 30000-32767 will be used. Such Services without a NodePort specified will have one automatically allocated from this range. This parameter can be updated after the cluster is installed.", + "networkDiagnostics": "networkDiagnostics defines network diagnostics configuration.\n\nTakes precedence over spec.disableNetworkDiagnostics in network.operator.openshift.io. If networkDiagnostics is not specified or is empty, and the spec.disableNetworkDiagnostics flag in network.operator.openshift.io is set to true, the network diagnostics feature will be disabled.", +} + +func (NetworkSpec) SwaggerDoc() map[string]string { + return map_NetworkSpec +} + +var map_NetworkStatus = map[string]string{ + "": "NetworkStatus is the current network configuration.", + "clusterNetwork": "IP address pool to use for pod IPs.", + "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here.", + "networkType": "networkType is the plugin that is deployed (e.g. OVNKubernetes).", + "clusterNetworkMTU": "clusterNetworkMTU is the MTU for inter-pod networking.", + "migration": "migration contains the cluster network migration configuration.", + "conditions": "conditions represents the observations of a network.config current state. Known .status.conditions.type are: \"NetworkDiagnosticsAvailable\"", +} + +func (NetworkStatus) SwaggerDoc() map[string]string { + return map_NetworkStatus +} + +var map_Node = map[string]string{ + "": "Node holds cluster-wide information about node specific features.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values.", +} + +func (Node) SwaggerDoc() map[string]string { + return map_Node +} + +var map_NodeList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (NodeList) SwaggerDoc() map[string]string { + return map_NodeList +} + +var map_NodeSpec = map[string]string{ + "cgroupMode": "cgroupMode determines the cgroups version on the node", + "workerLatencyProfile": "workerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster", + "minimumKubeletVersion": "minimumKubeletVersion is the lowest version of a kubelet that can join the cluster. Specifically, the apiserver will deny most authorization requests of kubelets that are older than the specified version, only allowing the kubelet to get and update its node object, and perform subjectaccessreviews. This means any kubelet that attempts to join the cluster will not be able to run any assigned workloads, and will eventually be marked as not ready. Its max length is 8, so maximum version allowed is either \"9.999.99\" or \"99.99.99\". Since the kubelet reports the version of the kubernetes release, not Openshift, this field references the underlying kubernetes version this version of Openshift is based off of. In other words: if an admin wishes to ensure no nodes run an older version than Openshift 4.17, then they should set the minimumKubeletVersion to 1.30.0. When comparing versions, the kubelet's version is stripped of any contents outside of major.minor.patch version. Thus, a kubelet with version \"1.0.0-ec.0\" will be compatible with minimumKubeletVersion \"1.0.0\" or earlier.", +} + +func (NodeSpec) SwaggerDoc() map[string]string { + return map_NodeSpec +} + +var map_NodeStatus = map[string]string{ + "conditions": "conditions contain the details and the current state of the nodes.config object", +} + +func (NodeStatus) SwaggerDoc() map[string]string { + return map_NodeStatus +} + +var map_BasicAuthIdentityProvider = map[string]string{ + "": "BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials", +} + +func (BasicAuthIdentityProvider) SwaggerDoc() map[string]string { + return map_BasicAuthIdentityProvider +} + +var map_GitHubIdentityProvider = map[string]string{ + "": "GitHubIdentityProvider provides identities for users authenticating using GitHub credentials", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "organizations": "organizations optionally restricts which organizations are allowed to log in", + "teams": "teams optionally restricts which teams are allowed to log in. Format is /.", + "hostname": "hostname is the optional domain (e.g. \"mycompany.com\") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value configured at /setup/settings#hostname.", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. The namespace for this config map is openshift-config.", +} + +func (GitHubIdentityProvider) SwaggerDoc() map[string]string { + return map_GitHubIdentityProvider +} + +var map_GitLabIdentityProvider = map[string]string{ + "": "GitLabIdentityProvider provides identities for users authenticating using GitLab credentials", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "url": "url is the oauth server base URL", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", +} + +func (GitLabIdentityProvider) SwaggerDoc() map[string]string { + return map_GitLabIdentityProvider +} + +var map_GoogleIdentityProvider = map[string]string{ + "": "GoogleIdentityProvider provides identities for users authenticating using Google credentials", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "hostedDomain": "hostedDomain is the optional Google App domain (e.g. \"mycompany.com\") to restrict logins to", +} + +func (GoogleIdentityProvider) SwaggerDoc() map[string]string { + return map_GoogleIdentityProvider +} + +var map_HTPasswdIdentityProvider = map[string]string{ + "": "HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials", + "fileData": "fileData is a required reference to a secret by name containing the data to use as the htpasswd file. The key \"htpasswd\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. If the specified htpasswd data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.", +} + +func (HTPasswdIdentityProvider) SwaggerDoc() map[string]string { + return map_HTPasswdIdentityProvider +} + +var map_IdentityProvider = map[string]string{ + "": "IdentityProvider provides identities for users authenticating using credentials", + "name": "name is used to qualify the identities returned by this provider. - It MUST be unique and not shared by any other identity provider used - It MUST be a valid path segment: name cannot equal \".\" or \"..\" or contain \"/\" or \"%\" or \":\"\n Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName", + "mappingMethod": "mappingMethod determines how identities from this provider are mapped to users Defaults to \"claim\"", +} + +func (IdentityProvider) SwaggerDoc() map[string]string { + return map_IdentityProvider +} + +var map_IdentityProviderConfig = map[string]string{ + "": "IdentityProviderConfig contains configuration for using a specific identity provider", + "type": "type identifies the identity provider type for this entry.", + "basicAuth": "basicAuth contains configuration options for the BasicAuth IdP", + "github": "github enables user authentication using GitHub credentials", + "gitlab": "gitlab enables user authentication using GitLab credentials", + "google": "google enables user authentication using Google credentials", + "htpasswd": "htpasswd enables user authentication using an HTPasswd file to validate credentials", + "keystone": "keystone enables user authentication using keystone password credentials", + "ldap": "ldap enables user authentication using LDAP credentials", + "openID": "openID enables user authentication using OpenID credentials", + "requestHeader": "requestHeader enables user authentication using request header credentials", +} + +func (IdentityProviderConfig) SwaggerDoc() map[string]string { + return map_IdentityProviderConfig +} + +var map_KeystoneIdentityProvider = map[string]string{ + "": "KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials", + "domainName": "domainName is required for keystone v3", +} + +func (KeystoneIdentityProvider) SwaggerDoc() map[string]string { + return map_KeystoneIdentityProvider +} + +var map_LDAPAttributeMapping = map[string]string{ + "": "LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields", + "id": "id is the list of attributes whose values should be used as the user ID. Required. First non-empty attribute is used. At least one attribute is required. If none of the listed attribute have a value, authentication fails. LDAP standard identity attribute is \"dn\"", + "preferredUsername": "preferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is \"uid\"", + "name": "name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is \"cn\"", + "email": "email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", +} + +func (LDAPAttributeMapping) SwaggerDoc() map[string]string { + return map_LDAPAttributeMapping +} + +var map_LDAPIdentityProvider = map[string]string{ + "": "LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials", + "url": "url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is: ldap://host:port/basedn?attribute?scope?filter", + "bindDN": "bindDN is an optional DN to bind with during the search phase.", + "bindPassword": "bindPassword is an optional reference to a secret by name containing a password to bind with during the search phase. The key \"bindPassword\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "insecure": "insecure, if true, indicates the connection should not use TLS WARNING: Should not be set to `true` with the URL scheme \"ldaps://\" as \"ldaps://\" URLs always\n attempt to connect using TLS, even when `insecure` is set to `true`\nWhen `true`, \"ldap://\" URLS connect insecurely. When `false`, \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830.", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", + "attributes": "attributes maps LDAP attributes to identities", +} + +func (LDAPIdentityProvider) SwaggerDoc() map[string]string { + return map_LDAPIdentityProvider +} + +var map_OAuth = map[string]string{ + "": "OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`. It is used to configure the integrated OAuth server. This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (OAuth) SwaggerDoc() map[string]string { + return map_OAuth +} + +var map_OAuthList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (OAuthList) SwaggerDoc() map[string]string { + return map_OAuthList +} + +var map_OAuthRemoteConnectionInfo = map[string]string{ + "": "OAuthRemoteConnectionInfo holds information necessary for establishing a remote connection", + "url": "url is the remote URL to connect to", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", + "tlsClientCert": "tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key \"tls.crt\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.", + "tlsClientKey": "tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key \"tls.key\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.", +} + +func (OAuthRemoteConnectionInfo) SwaggerDoc() map[string]string { + return map_OAuthRemoteConnectionInfo +} + +var map_OAuthSpec = map[string]string{ + "": "OAuthSpec contains desired cluster auth configuration", + "identityProviders": "identityProviders is an ordered list of ways for a user to identify themselves. When this list is empty, no identities are provisioned for users.", + "tokenConfig": "tokenConfig contains options for authorization and access tokens", + "templates": "templates allow you to customize pages like the login page.", +} + +func (OAuthSpec) SwaggerDoc() map[string]string { + return map_OAuthSpec +} + +var map_OAuthStatus = map[string]string{ + "": "OAuthStatus shows current known state of OAuth server in the cluster", +} + +func (OAuthStatus) SwaggerDoc() map[string]string { + return map_OAuthStatus +} + +var map_OAuthTemplates = map[string]string{ + "": "OAuthTemplates allow for customization of pages like the login page", + "login": "login is the name of a secret that specifies a go template to use to render the login page. The key \"login.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default login page is used. If the specified template is not valid, the default login page is used. If unspecified, the default login page is used. The namespace for this secret is openshift-config.", + "providerSelection": "providerSelection is the name of a secret that specifies a go template to use to render the provider selection page. The key \"providers.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default provider selection page is used. If the specified template is not valid, the default provider selection page is used. If unspecified, the default provider selection page is used. The namespace for this secret is openshift-config.", + "error": "error is the name of a secret that specifies a go template to use to render error pages during the authentication or grant flow. The key \"errors.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default error page is used. If the specified template is not valid, the default error page is used. If unspecified, the default error page is used. The namespace for this secret is openshift-config.", +} + +func (OAuthTemplates) SwaggerDoc() map[string]string { + return map_OAuthTemplates +} + +var map_OpenIDClaims = map[string]string{ + "": "OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider", + "preferredUsername": "preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the sub claim", + "name": "name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity", + "email": "email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", + "groups": "groups is the list of claims value of which should be used to synchronize groups from the OIDC provider to OpenShift for the user. If multiple claims are specified, the first one with a non-empty value is used.", +} + +func (OpenIDClaims) SwaggerDoc() map[string]string { + return map_OpenIDClaims +} + +var map_OpenIDIdentityProvider = map[string]string{ + "": "OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", + "extraScopes": "extraScopes are any scopes to request in addition to the standard \"openid\" scope.", + "extraAuthorizeParameters": "extraAuthorizeParameters are any custom parameters to add to the authorize request.", + "issuer": "issuer is the URL that the OpenID Provider asserts as its Issuer Identifier. It must use the https scheme with no query or fragment component.", + "claims": "claims mappings", +} + +func (OpenIDIdentityProvider) SwaggerDoc() map[string]string { + return map_OpenIDIdentityProvider +} + +var map_RequestHeaderIdentityProvider = map[string]string{ + "": "RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials", + "loginURL": "loginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}\nRequired when login is set to true.", + "challengeURL": "challengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here. ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}\nRequired when challenge is set to true.", + "ca": "ca is a required reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. Specifically, it allows verification of incoming requests to prevent header spoofing. The key \"ca.crt\" is used to locate the data. If the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. The namespace for this config map is openshift-config.", + "clientCommonNames": "clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.", + "headers": "headers is the set of headers to check for identity information", + "preferredUsernameHeaders": "preferredUsernameHeaders is the set of headers to check for the preferred username", + "nameHeaders": "nameHeaders is the set of headers to check for the display name", + "emailHeaders": "emailHeaders is the set of headers to check for the email address", +} + +func (RequestHeaderIdentityProvider) SwaggerDoc() map[string]string { + return map_RequestHeaderIdentityProvider +} + +var map_TokenConfig = map[string]string{ + "": "TokenConfig holds the necessary configuration options for authorization and access tokens", + "accessTokenMaxAgeSeconds": "accessTokenMaxAgeSeconds defines the maximum age of access tokens", + "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect.", + "accessTokenInactivityTimeout": "accessTokenInactivityTimeout defines the token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Takes valid time duration string such as \"5m\", \"1.5h\" or \"2h45m\". The minimum allowed value for duration is 300s (5 minutes). If the timeout is configured per client, then that value takes precedence. If the timeout value is not specified and the client does not override the value, then tokens are valid until their lifetime.\n\nWARNING: existing tokens' timeout will not be affected (lowered) by changing this value", +} + +func (TokenConfig) SwaggerDoc() map[string]string { + return map_TokenConfig +} + +var map_HubSource = map[string]string{ + "": "HubSource is used to specify the hub source and its configuration", + "name": "name is the name of one of the default hub sources", + "disabled": "disabled is used to disable a default hub source on cluster", +} + +func (HubSource) SwaggerDoc() map[string]string { + return map_HubSource +} + +var map_HubSourceStatus = map[string]string{ + "": "HubSourceStatus is used to reflect the current state of applying the configuration to a default source", + "status": "status indicates success or failure in applying the configuration", + "message": "message provides more information regarding failures", +} + +func (HubSourceStatus) SwaggerDoc() map[string]string { + return map_HubSourceStatus +} + +var map_OperatorHub = map[string]string{ + "": "OperatorHub is the Schema for the operatorhubs API. It can be used to change the state of the default hub sources for OperatorHub on the cluster from enabled to disabled and vice versa.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (OperatorHub) SwaggerDoc() map[string]string { + return map_OperatorHub +} + +var map_OperatorHubList = map[string]string{ + "": "OperatorHubList contains a list of OperatorHub\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (OperatorHubList) SwaggerDoc() map[string]string { + return map_OperatorHubList +} + +var map_OperatorHubSpec = map[string]string{ + "": "OperatorHubSpec defines the desired state of OperatorHub", + "disableAllDefaultSources": "disableAllDefaultSources allows you to disable all the default hub sources. If this is true, a specific entry in sources can be used to enable a default source. If this is false, a specific entry in sources can be used to disable or enable a default source.", + "sources": "sources is the list of default hub sources and their configuration. If the list is empty, it implies that the default hub sources are enabled on the cluster unless disableAllDefaultSources is true. If disableAllDefaultSources is true and sources is not empty, the configuration present in sources will take precedence. The list of default hub sources and their current state will always be reflected in the status block.", +} + +func (OperatorHubSpec) SwaggerDoc() map[string]string { + return map_OperatorHubSpec +} + +var map_OperatorHubStatus = map[string]string{ + "": "OperatorHubStatus defines the observed state of OperatorHub. The current state of the default hub sources will always be reflected here.", + "sources": "sources encapsulates the result of applying the configuration for each hub source", +} + +func (OperatorHubStatus) SwaggerDoc() map[string]string { + return map_OperatorHubStatus +} + +var map_Project = map[string]string{ + "": "Project holds cluster-wide information about Project. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Project) SwaggerDoc() map[string]string { + return map_Project +} + +var map_ProjectList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ProjectList) SwaggerDoc() map[string]string { + return map_ProjectList +} + +var map_ProjectSpec = map[string]string{ + "": "ProjectSpec holds the project creation configuration.", + "projectRequestMessage": "projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint", + "projectRequestTemplate": "projectRequestTemplate is the template to use for creating projects in response to projectrequest. This must point to a template in 'openshift-config' namespace. It is optional. If it is not specified, a default template is used.", +} + +func (ProjectSpec) SwaggerDoc() map[string]string { + return map_ProjectSpec +} + +var map_TemplateReference = map[string]string{ + "": "TemplateReference references a template in a specific namespace. The namespace must be specified at the point of use.", + "name": "name is the metadata.name of the referenced project request template", +} + +func (TemplateReference) SwaggerDoc() map[string]string { + return map_TemplateReference +} + +var map_Proxy = map[string]string{ + "": "Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user-settable values for the proxy configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Proxy) SwaggerDoc() map[string]string { + return map_Proxy +} + +var map_ProxyList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ProxyList) SwaggerDoc() map[string]string { + return map_ProxyList +} + +var map_ProxySpec = map[string]string{ + "": "ProxySpec contains cluster proxy creation configuration.", + "httpProxy": "httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var.", + "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var.", + "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. Empty means unset and will not result in an env var.", + "readinessEndpoints": "readinessEndpoints is a list of endpoints used to verify readiness of the proxy.", + "trustedCA": "trustedCA is a reference to a ConfigMap containing a CA certificate bundle. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from the required key \"ca-bundle.crt\", merging it with the system default trust bundle, and writing the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. Clients that expect to make proxy connections must use the trusted-ca-bundle for all HTTPS requests to the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as well.\n\nThe namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml):\n\napiVersion: v1 kind: ConfigMap metadata:\n name: user-ca-bundle\n namespace: openshift-config\n data:\n ca-bundle.crt: |", +} + +func (ProxySpec) SwaggerDoc() map[string]string { + return map_ProxySpec +} + +var map_ProxyStatus = map[string]string{ + "": "ProxyStatus shows current known state of the cluster proxy.", + "httpProxy": "httpProxy is the URL of the proxy for HTTP requests.", + "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests.", + "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used.", +} + +func (ProxyStatus) SwaggerDoc() map[string]string { + return map_ProxyStatus +} + +var map_ProfileCustomizations = map[string]string{ + "": "ProfileCustomizations contains various parameters for modifying the default behavior of certain profiles", + "dynamicResourceAllocation": "dynamicResourceAllocation allows to enable or disable dynamic resource allocation within the scheduler. Dynamic resource allocation is an API for requesting and sharing resources between pods and containers inside a pod. Third-party resource drivers are responsible for tracking and allocating resources. Different kinds of resources support arbitrary parameters for defining requirements and initialization. Valid values are Enabled, Disabled and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is Disabled.", +} + +func (ProfileCustomizations) SwaggerDoc() map[string]string { + return map_ProfileCustomizations +} + +var map_Scheduler = map[string]string{ + "": "Scheduler holds cluster-wide config information to run the Kubernetes Scheduler and influence its placement decisions. The canonical name for this config is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Scheduler) SwaggerDoc() map[string]string { + return map_Scheduler +} + +var map_SchedulerList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (SchedulerList) SwaggerDoc() map[string]string { + return map_SchedulerList +} + +var map_SchedulerSpec = map[string]string{ + "policy": "DEPRECATED: the scheduler Policy API has been deprecated and will be removed in a future release. policy is a reference to a ConfigMap containing scheduler policy which has user specified predicates and priorities. If this ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider. The namespace for this configmap is openshift-config.", + "profile": "profile sets which scheduling profile should be set in order to configure scheduling decisions for new pods.\n\nValid values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" Defaults to \"LowNodeUtilization\"", + "profileCustomizations": "profileCustomizations contains configuration for modifying the default behavior of existing scheduler profiles.", + "defaultNodeSelector": "defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces and creates an intersection with any existing nodeSelectors already set on a pod, additionally constraining that pod's selector. For example, defaultNodeSelector: \"type=user-node,region=east\" would set nodeSelector field in pod spec to \"type=user-node,region=east\" to all pods created in all namespaces. Namespaces having project-wide node selectors won't be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector='type=user-node,region=east', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: \"type=user-node,region=west\" means that the default of \"type=user-node,region=east\" set in defaultNodeSelector would not be applied.", + "mastersSchedulable": "mastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.", +} + +func (SchedulerSpec) SwaggerDoc() map[string]string { + return map_SchedulerSpec +} + +var map_FeatureGateTests = map[string]string{ + "featureGate": "featureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance.", + "tests": "tests contains an item for every TestName", +} + +func (FeatureGateTests) SwaggerDoc() map[string]string { + return map_FeatureGateTests +} + +var map_TestDetails = map[string]string{ + "testName": "testName is the name of the test as it appears in junit XMLs. It does not include the suite name since the same test can be executed in many suites.", +} + +func (TestDetails) SwaggerDoc() map[string]string { + return map_TestDetails +} + +var map_TestReporting = map[string]string{ + "": "TestReporting is used for origin (and potentially others) to report the test names for a given FeatureGate into the payload for later analysis on a per-payload basis. This doesn't need any CRD because it's never stored in the cluster.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (TestReporting) SwaggerDoc() map[string]string { + return map_TestReporting +} + +var map_TestReportingSpec = map[string]string{ + "testsForFeatureGates": "testsForFeatureGates is a list, indexed by FeatureGate and includes information about testing.", +} + +func (TestReportingSpec) SwaggerDoc() map[string]string { + return map_TestReportingSpec +} + +var map_CustomTLSProfile = map[string]string{ + "": "CustomTLSProfile is a user-defined TLS security profile. Be extremely careful using a custom TLS profile as invalid configurations can be catastrophic.", +} + +func (CustomTLSProfile) SwaggerDoc() map[string]string { + return map_CustomTLSProfile +} + +var map_IntermediateTLSProfile = map[string]string{ + "": "IntermediateTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29", +} + +func (IntermediateTLSProfile) SwaggerDoc() map[string]string { + return map_IntermediateTLSProfile +} + +var map_ModernTLSProfile = map[string]string{ + "": "ModernTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility", +} + +func (ModernTLSProfile) SwaggerDoc() map[string]string { + return map_ModernTLSProfile +} + +var map_OldTLSProfile = map[string]string{ + "": "OldTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility", +} + +func (OldTLSProfile) SwaggerDoc() map[string]string { + return map_OldTLSProfile +} + +var map_TLSProfileSpec = map[string]string{ + "": "TLSProfileSpec is the desired behavior of a TLSSecurityProfile.", + "ciphers": "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml):\n\n ciphers:\n - DES-CBC3-SHA", + "minTLSVersion": "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):\n\n minTLSVersion: VersionTLS11\n\nNOTE: currently the highest minTLSVersion allowed is VersionTLS12", +} + +func (TLSProfileSpec) SwaggerDoc() map[string]string { + return map_TLSProfileSpec +} + +var map_TLSSecurityProfile = map[string]string{ + "": "TLSSecurityProfile defines the schema for a TLS security profile. This object is used by operators to apply TLS security settings to operands.", + "type": "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations\n\nThe profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced.\n\nNote that the Modern profile is currently not supported because it is not yet well adopted by common software libraries.", + "old": "old is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n\n - TLS_AES_128_GCM_SHA256\n\n - TLS_AES_256_GCM_SHA384\n\n - TLS_CHACHA20_POLY1305_SHA256\n\n - ECDHE-ECDSA-AES128-GCM-SHA256\n\n - ECDHE-RSA-AES128-GCM-SHA256\n\n - ECDHE-ECDSA-AES256-GCM-SHA384\n\n - ECDHE-RSA-AES256-GCM-SHA384\n\n - ECDHE-ECDSA-CHACHA20-POLY1305\n\n - ECDHE-RSA-CHACHA20-POLY1305\n\n - DHE-RSA-AES128-GCM-SHA256\n\n - DHE-RSA-AES256-GCM-SHA384\n\n - DHE-RSA-CHACHA20-POLY1305\n\n - ECDHE-ECDSA-AES128-SHA256\n\n - ECDHE-RSA-AES128-SHA256\n\n - ECDHE-ECDSA-AES128-SHA\n\n - ECDHE-RSA-AES128-SHA\n\n - ECDHE-ECDSA-AES256-SHA384\n\n - ECDHE-RSA-AES256-SHA384\n\n - ECDHE-ECDSA-AES256-SHA\n\n - ECDHE-RSA-AES256-SHA\n\n - DHE-RSA-AES128-SHA256\n\n - DHE-RSA-AES256-SHA256\n\n - AES128-GCM-SHA256\n\n - AES256-GCM-SHA384\n\n - AES128-SHA256\n\n - AES256-SHA256\n\n - AES128-SHA\n\n - AES256-SHA\n\n - DES-CBC3-SHA\n\n minTLSVersion: VersionTLS10", + "intermediate": "intermediate is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29\n\nand looks like this (yaml):\n\n ciphers:\n\n - TLS_AES_128_GCM_SHA256\n\n - TLS_AES_256_GCM_SHA384\n\n - TLS_CHACHA20_POLY1305_SHA256\n\n - ECDHE-ECDSA-AES128-GCM-SHA256\n\n - ECDHE-RSA-AES128-GCM-SHA256\n\n - ECDHE-ECDSA-AES256-GCM-SHA384\n\n - ECDHE-RSA-AES256-GCM-SHA384\n\n - ECDHE-ECDSA-CHACHA20-POLY1305\n\n - ECDHE-RSA-CHACHA20-POLY1305\n\n - DHE-RSA-AES128-GCM-SHA256\n\n - DHE-RSA-AES256-GCM-SHA384\n\n minTLSVersion: VersionTLS12", + "modern": "modern is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n\n - TLS_AES_128_GCM_SHA256\n\n - TLS_AES_256_GCM_SHA384\n\n - TLS_CHACHA20_POLY1305_SHA256\n\n minTLSVersion: VersionTLS13", + "custom": "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this:\n\n ciphers:\n\n - ECDHE-ECDSA-CHACHA20-POLY1305\n\n - ECDHE-RSA-CHACHA20-POLY1305\n\n - ECDHE-RSA-AES128-GCM-SHA256\n\n - ECDHE-ECDSA-AES128-GCM-SHA256\n\n minTLSVersion: VersionTLS11", +} + +func (TLSSecurityProfile) SwaggerDoc() map[string]string { + return map_TLSSecurityProfile +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/config/v1alpha1/Makefile b/vendor/github.com/openshift/api/config/v1alpha1/Makefile new file mode 100644 index 0000000000000..e32ad5d9e19a8 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="config.openshift.io/v1alpha1" diff --git a/vendor/github.com/openshift/api/config/v1alpha1/doc.go b/vendor/github.com/openshift/api/config/v1alpha1/doc.go new file mode 100644 index 0000000000000..20d448573955f --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=config.openshift.io +// Package v1alpha1 is the v1alpha1 version of the API. +package v1alpha1 diff --git a/vendor/github.com/openshift/api/config/v1alpha1/register.go b/vendor/github.com/openshift/api/config/v1alpha1/register.go new file mode 100644 index 0000000000000..36432ceb802e6 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/register.go @@ -0,0 +1,44 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "config.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &InsightsDataGather{}, + &InsightsDataGatherList{}, + &Backup{}, + &BackupList{}, + &ImagePolicy{}, + &ImagePolicyList{}, + &ClusterImagePolicy{}, + &ClusterImagePolicyList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go b/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go new file mode 100644 index 0000000000000..e52a2e5c5362e --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go @@ -0,0 +1,163 @@ +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +// Backup provides configuration for performing backups of the openshift cluster. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=backups,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1482 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=AutomatedEtcdBackup +// +openshift:compatibility-gen:level=4 +type Backup struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec BackupSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status BackupStatus `json:"status"` +} + +type BackupSpec struct { + // etcd specifies the configuration for periodic backups of the etcd cluster + // +required + EtcdBackupSpec EtcdBackupSpec `json:"etcd"` +} + +type BackupStatus struct { +} + +// EtcdBackupSpec provides configuration for automated etcd backups to the cluster-etcd-operator +type EtcdBackupSpec struct { + + // schedule defines the recurring backup schedule in Cron format + // every 2 hours: 0 */2 * * * + // every day at 3am: 0 3 * * * + // Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. + // The current default is "no backups", but will change in the future. + // +optional + // +kubebuilder:validation:Pattern:=`^(@(annually|yearly|monthly|weekly|daily|hourly))|(\*|(?:\*|(?:[0-9]|(?:[1-5][0-9])))\/(?:[0-9]|(?:[1-5][0-9]))|(?:[0-9]|(?:[1-5][0-9]))(?:(?:\-[0-9]|\-(?:[1-5][0-9]))?|(?:\,(?:[0-9]|(?:[1-5][0-9])))*)) (\*|(?:\*|(?:\*|(?:[0-9]|1[0-9]|2[0-3])))\/(?:[0-9]|1[0-9]|2[0-3])|(?:[0-9]|1[0-9]|2[0-3])(?:(?:\-(?:[0-9]|1[0-9]|2[0-3]))?|(?:\,(?:[0-9]|1[0-9]|2[0-3]))*)) (\*|(?:[1-9]|(?:[12][0-9])|3[01])(?:(?:\-(?:[1-9]|(?:[12][0-9])|3[01]))?|(?:\,(?:[1-9]|(?:[12][0-9])|3[01]))*)) (\*|(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC)(?:(?:\-(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))?|(?:\,(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))*)) (\*|(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT)(?:(?:\-(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))?|(?:\,(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))*))$` + Schedule string `json:"schedule"` + + // Cron Regex breakdown: + // Allow macros: (@(annually|yearly|monthly|weekly|daily|hourly)) + // OR + // Minute: + // (\*|(?:\*|(?:[0-9]|(?:[1-5][0-9])))\/(?:[0-9]|(?:[1-5][0-9]))|(?:[0-9]|(?:[1-5][0-9]))(?:(?:\-[0-9]|\-(?:[1-5][0-9]))?|(?:\,(?:[0-9]|(?:[1-5][0-9])))*)) + // Hour: + // (\*|(?:\*|(?:\*|(?:[0-9]|1[0-9]|2[0-3])))\/(?:[0-9]|1[0-9]|2[0-3])|(?:[0-9]|1[0-9]|2[0-3])(?:(?:\-(?:[0-9]|1[0-9]|2[0-3]))?|(?:\,(?:[0-9]|1[0-9]|2[0-3]))*)) + // Day of the Month: + // (\*|(?:[1-9]|(?:[12][0-9])|3[01])(?:(?:\-(?:[1-9]|(?:[12][0-9])|3[01]))?|(?:\,(?:[1-9]|(?:[12][0-9])|3[01]))*)) + // Month: + // (\*|(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC)(?:(?:\-(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))?|(?:\,(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))*)) + // Day of Week: + // (\*|(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT)(?:(?:\-(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))?|(?:\,(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))*)) + // + + // The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. + // If not specified, this will default to the time zone of the kube-controller-manager process. + // See https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones + // +optional + // +kubebuilder:validation:Pattern:=`^([A-Za-z_]+([+-]*0)*|[A-Za-z_]+(\/[A-Za-z_]+){1,2})(\/GMT[+-]\d{1,2})?$` + TimeZone string `json:"timeZone"` + + // Timezone regex breakdown: + // ([A-Za-z_]+([+-]*0)*|[A-Za-z_]+(/[A-Za-z_]+){1,2}) - Matches either: + // [A-Za-z_]+([+-]*0)* - One or more alphabetical characters (uppercase or lowercase) or underscores, followed by a +0 or -0 to account for GMT+0 or GMT-0 (for the first part of the timezone identifier). + // [A-Za-z_]+(/[A-Za-z_]+){1,2} - One or more alphabetical characters (uppercase or lowercase) or underscores, followed by one or two occurrences of a forward slash followed by one or more alphabetical characters or underscores. This allows for matching timezone identifiers with 2 or 3 parts, e.g America/Argentina/Buenos_Aires + // (/GMT[+-]\d{1,2})? - Makes the GMT offset suffix optional. It matches "/GMT" followed by either a plus ("+") or minus ("-") sign and one or two digits (the GMT offset) + + // retentionPolicy defines the retention policy for retaining and deleting existing backups. + // +optional + RetentionPolicy RetentionPolicy `json:"retentionPolicy"` + + // pvcName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the + // etcd backup files would be saved + // The PVC itself must always be created in the "openshift-etcd" namespace + // If the PVC is left unspecified "" then the platform will choose a reasonable default location to save the backup. + // In the future this would be backups saved across the control-plane master nodes. + // +optional + PVCName string `json:"pvcName"` +} + +// RetentionType is the enumeration of valid retention policy types +// +enum +// +kubebuilder:validation:Enum:="RetentionNumber";"RetentionSize" +type RetentionType string + +const ( + // RetentionTypeNumber sets the retention policy based on the number of backup files saved + RetentionTypeNumber RetentionType = "RetentionNumber" + // RetentionTypeSize sets the retention policy based on the total size of the backup files saved + RetentionTypeSize RetentionType = "RetentionSize" +) + +// RetentionPolicy defines the retention policy for retaining and deleting existing backups. +// This struct is a discriminated union that allows users to select the type of retention policy from the supported types. +// +union +type RetentionPolicy struct { + // retentionType sets the type of retention policy. + // Currently, the only valid policies are retention by number of backups (RetentionNumber), by the size of backups (RetentionSize). More policies or types may be added in the future. + // Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. + // The current default is RetentionNumber with 15 backups kept. + // +unionDiscriminator + // +required + // +kubebuilder:validation:Enum:="";"RetentionNumber";"RetentionSize" + RetentionType RetentionType `json:"retentionType"` + + // retentionNumber configures the retention policy based on the number of backups + // +optional + RetentionNumber *RetentionNumberConfig `json:"retentionNumber,omitempty"` + + // retentionSize configures the retention policy based on the size of backups + // +optional + RetentionSize *RetentionSizeConfig `json:"retentionSize,omitempty"` +} + +// RetentionNumberConfig specifies the configuration of the retention policy on the number of backups +type RetentionNumberConfig struct { + // maxNumberOfBackups defines the maximum number of backups to retain. + // If the existing number of backups saved is equal to MaxNumberOfBackups then + // the oldest backup will be removed before a new backup is initiated. + // +kubebuilder:validation:Minimum=1 + // +required + MaxNumberOfBackups int `json:"maxNumberOfBackups,omitempty"` +} + +// RetentionSizeConfig specifies the configuration of the retention policy on the total size of backups +type RetentionSizeConfig struct { + // maxSizeOfBackupsGb defines the total size in GB of backups to retain. + // If the current total size backups exceeds MaxSizeOfBackupsGb then + // the oldest backup will be removed before a new backup is initiated. + // +kubebuilder:validation:Minimum=1 + // +required + MaxSizeOfBackupsGb int `json:"maxSizeOfBackupsGb,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupList is a collection of items +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type BackupList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + Items []Backup `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go new file mode 100644 index 0000000000000..5eaeeea736be7 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go @@ -0,0 +1,79 @@ +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterImagePolicy holds cluster-wide configuration for image signature verification +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=clusterimagepolicies,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1457 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=SigstoreImageVerification +// +openshift:compatibility-gen:level=4 +type ClusterImagePolicy struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec contains the configuration for the cluster image policy. + // +required + Spec ClusterImagePolicySpec `json:"spec"` + // status contains the observed state of the resource. + // +optional + Status ClusterImagePolicyStatus `json:"status,omitempty"` +} + +// CLusterImagePolicySpec is the specification of the ClusterImagePolicy custom resource. +type ClusterImagePolicySpec struct { + // scopes defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the "Docker Registry HTTP API V2". + // Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). + // More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository + // namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). + // Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. + // If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. + // In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories + // quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. + // If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. + // For additional details about the format, please refer to the document explaining the docker transport field, + // which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker + // +required + // +kubebuilder:validation:MaxItems=256 + // +listType=set + Scopes []ImageScope `json:"scopes"` + // policy contains configuration to allow scopes to be verified, and defines how + // images not matching the verification policy will be treated. + // +required + Policy Policy `json:"policy"` +} + +// +k8s:deepcopy-gen=true +type ClusterImagePolicyStatus struct { + // conditions provide details on the status of this API Resource. + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterImagePolicyList is a list of ClusterImagePolicy resources +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type ClusterImagePolicyList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []ClusterImagePolicy `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go b/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go new file mode 100644 index 0000000000000..7f57d88f9158a --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go @@ -0,0 +1,238 @@ +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImagePolicy holds namespace-wide configuration for image signature verification +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=imagepolicies,scope=Namespaced +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1457 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=SigstoreImageVerification +// +openshift:compatibility-gen:level=4 +type ImagePolicy struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec ImagePolicySpec `json:"spec"` + // status contains the observed state of the resource. + // +optional + Status ImagePolicyStatus `json:"status,omitempty"` +} + +// ImagePolicySpec is the specification of the ImagePolicy CRD. +type ImagePolicySpec struct { + // scopes defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the "Docker Registry HTTP API V2". + // Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). + // More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository + // namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). + // Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. + // If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. + // In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories + // quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. + // If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. + // For additional details about the format, please refer to the document explaining the docker transport field, + // which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker + // +required + // +kubebuilder:validation:MaxItems=256 + // +listType=set + Scopes []ImageScope `json:"scopes"` + // policy contains configuration to allow scopes to be verified, and defines how + // images not matching the verification policy will be treated. + // +required + Policy Policy `json:"policy"` +} + +// +kubebuilder:validation:XValidation:rule="size(self.split('/')[0].split('.')) == 1 ? self.split('/')[0].split('.')[0].split(':')[0] == 'localhost' : true",message="invalid image scope format, scope must contain a fully qualified domain name or 'localhost'" +// +kubebuilder:validation:XValidation:rule=`self.contains('*') ? self.matches('^\\*(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$') : true`,message="invalid image scope with wildcard, a wildcard can only be at the start of the domain and is only supported for subdomain matching, not path matching" +// +kubebuilder:validation:XValidation:rule=`!self.contains('*') ? self.matches('^((((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\\w][\\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}))?$') : true`,message="invalid repository namespace or image specification in the image scope" +// +kubebuilder:validation:MaxLength=512 +type ImageScope string + +// Policy defines the verification policy for the items in the scopes list. +type Policy struct { + // rootOfTrust specifies the root of trust for the policy. + // +required + RootOfTrust PolicyRootOfTrust `json:"rootOfTrust"` + // signedIdentity specifies what image identity the signature claims about the image. The required matchPolicy field specifies the approach used in the verification process to verify the identity in the signature and the actual image identity, the default matchPolicy is "MatchRepoDigestOrExact". + // +optional + SignedIdentity PolicyIdentity `json:"signedIdentity,omitempty"` +} + +// PolicyRootOfTrust defines the root of trust based on the selected policyType. +// +union +// +kubebuilder:validation:XValidation:rule="has(self.policyType) && self.policyType == 'PublicKey' ? has(self.publicKey) : !has(self.publicKey)",message="publicKey is required when policyType is PublicKey, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.policyType) && self.policyType == 'FulcioCAWithRekor' ? has(self.fulcioCAWithRekor) : !has(self.fulcioCAWithRekor)",message="fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise" +type PolicyRootOfTrust struct { + // policyType serves as the union's discriminator. Users are required to assign a value to this field, choosing one of the policy types that define the root of trust. + // "PublicKey" indicates that the policy relies on a sigstore publicKey and may optionally use a Rekor verification. + // "FulcioCAWithRekor" indicates that the policy is based on the Fulcio certification and incorporates a Rekor verification. + // +unionDiscriminator + // +required + PolicyType PolicyType `json:"policyType"` + // publicKey defines the root of trust based on a sigstore public key. + // +optional + PublicKey *PublicKey `json:"publicKey,omitempty"` + // fulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key. + // For more information about Fulcio and Rekor, please refer to the document at: + // https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor + // +optional + FulcioCAWithRekor *FulcioCAWithRekor `json:"fulcioCAWithRekor,omitempty"` +} + +// +kubebuilder:validation:Enum=PublicKey;FulcioCAWithRekor +type PolicyType string + +const ( + PublicKeyRootOfTrust PolicyType = "PublicKey" + FulcioCAWithRekorRootOfTrust PolicyType = "FulcioCAWithRekor" +) + +// PublicKey defines the root of trust based on a sigstore public key. +type PublicKey struct { + // keyData contains inline base64-encoded data for the PEM format public key. + // KeyData must be at most 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + KeyData []byte `json:"keyData"` + // rekorKeyData contains inline base64-encoded data for the PEM format from the Rekor public key. + // rekorKeyData must be at most 8192 characters. + // +optional + // +kubebuilder:validation:MaxLength=8192 + RekorKeyData []byte `json:"rekorKeyData,omitempty"` +} + +// FulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key. +type FulcioCAWithRekor struct { + // fulcioCAData contains inline base64-encoded data for the PEM format fulcio CA. + // fulcioCAData must be at most 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + FulcioCAData []byte `json:"fulcioCAData"` + // rekorKeyData contains inline base64-encoded data for the PEM format from the Rekor public key. + // rekorKeyData must be at most 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + RekorKeyData []byte `json:"rekorKeyData"` + // fulcioSubject specifies OIDC issuer and the email of the Fulcio authentication configuration. + // +required + FulcioSubject PolicyFulcioSubject `json:"fulcioSubject,omitempty"` +} + +// PolicyFulcioSubject defines the OIDC issuer and the email of the Fulcio authentication configuration. +type PolicyFulcioSubject struct { + // oidcIssuer contains the expected OIDC issuer. It will be verified that the Fulcio-issued certificate contains a (Fulcio-defined) certificate extension pointing at this OIDC issuer URL. When Fulcio issues certificates, it includes a value based on an URL inside the client-provided ID token. + // Example: "https://expected.OIDC.issuer/" + // +required + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="oidcIssuer must be a valid URL" + OIDCIssuer string `json:"oidcIssuer"` + // signedEmail holds the email address the the Fulcio certificate is issued for. + // Example: "expected-signing-user@example.com" + // +required + // +kubebuilder:validation:XValidation:rule=`self.matches('^\\S+@\\S+$')`,message="invalid email address" + SignedEmail string `json:"signedEmail"` +} + +// PolicyIdentity defines image identity the signature claims about the image. When omitted, the default matchPolicy is "MatchRepoDigestOrExact". +// +kubebuilder:validation:XValidation:rule="(has(self.matchPolicy) && self.matchPolicy == 'ExactRepository') ? has(self.exactRepository) : !has(self.exactRepository)",message="exactRepository is required when matchPolicy is ExactRepository, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="(has(self.matchPolicy) && self.matchPolicy == 'RemapIdentity') ? has(self.remapIdentity) : !has(self.remapIdentity)",message="remapIdentity is required when matchPolicy is RemapIdentity, and forbidden otherwise" +// +union +type PolicyIdentity struct { + // matchPolicy sets the type of matching to be used. + // Valid values are "MatchRepoDigestOrExact", "MatchRepository", "ExactRepository", "RemapIdentity". When omitted, the default value is "MatchRepoDigestOrExact". + // If set matchPolicy to ExactRepository, then the exactRepository must be specified. + // If set matchPolicy to RemapIdentity, then the remapIdentity must be specified. + // "MatchRepoDigestOrExact" means that the identity in the signature must be in the same repository as the image identity if the image identity is referenced by a digest. Otherwise, the identity in the signature must be the same as the image identity. + // "MatchRepository" means that the identity in the signature must be in the same repository as the image identity. + // "ExactRepository" means that the identity in the signature must be in the same repository as a specific identity specified by "repository". + // "RemapIdentity" means that the signature must be in the same as the remapped image identity. Remapped image identity is obtained by replacing the "prefix" with the specified “signedPrefix” if the the image identity matches the specified remapPrefix. + // +unionDiscriminator + // +required + MatchPolicy IdentityMatchPolicy `json:"matchPolicy"` + // exactRepository is required if matchPolicy is set to "ExactRepository". + // +optional + PolicyMatchExactRepository *PolicyMatchExactRepository `json:"exactRepository,omitempty"` + // remapIdentity is required if matchPolicy is set to "RemapIdentity". + // +optional + PolicyMatchRemapIdentity *PolicyMatchRemapIdentity `json:"remapIdentity,omitempty"` +} + +// +kubebuilder:validation:MaxLength=512 +// +kubebuilder:validation:XValidation:rule=`self.matches('.*:([\\w][\\w.-]{0,127})$')? self.matches('^(localhost:[0-9]+)$'): true`,message="invalid repository or prefix in the signedIdentity, should not include the tag or digest" +// +kubebuilder:validation:XValidation:rule=`self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$')`,message="invalid repository or prefix in the signedIdentity" +type IdentityRepositoryPrefix string + +type PolicyMatchExactRepository struct { + // repository is the reference of the image identity to be matched. + // The value should be a repository name (by omitting the tag or digest) in a registry implementing the "Docker Registry HTTP API V2". For example, docker.io/library/busybox + // +required + Repository IdentityRepositoryPrefix `json:"repository"` +} + +type PolicyMatchRemapIdentity struct { + // prefix is the prefix of the image identity to be matched. + // If the image identity matches the specified prefix, that prefix is replaced by the specified “signedPrefix” (otherwise it is used as unchanged and no remapping takes place). + // This useful when verifying signatures for a mirror of some other repository namespace that preserves the vendor’s repository structure. + // The prefix and signedPrefix values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, + // or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. + // For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox. + // +required + Prefix IdentityRepositoryPrefix `json:"prefix"` + // signedPrefix is the prefix of the image identity to be matched in the signature. The format is the same as "prefix". The values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, + // or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. + // For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox. + // +required + SignedPrefix IdentityRepositoryPrefix `json:"signedPrefix"` +} + +// IdentityMatchPolicy defines the type of matching for "matchPolicy". +// +kubebuilder:validation:Enum=MatchRepoDigestOrExact;MatchRepository;ExactRepository;RemapIdentity +type IdentityMatchPolicy string + +const ( + IdentityMatchPolicyMatchRepoDigestOrExact IdentityMatchPolicy = "MatchRepoDigestOrExact" + IdentityMatchPolicyMatchRepository IdentityMatchPolicy = "MatchRepository" + IdentityMatchPolicyExactRepository IdentityMatchPolicy = "ExactRepository" + IdentityMatchPolicyRemapIdentity IdentityMatchPolicy = "RemapIdentity" +) + +// +k8s:deepcopy-gen=true +type ImagePolicyStatus struct { + // conditions provide details on the status of this API Resource. + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImagePolicyList is a list of ImagePolicy resources +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type ImagePolicyList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []ImagePolicy `json:"items"` +} + +const ( + // ImagePolicyPending indicates that the customer resource contains a policy that cannot take effect. It is either overwritten by a global policy or the image scope is not valid. + ImagePolicyPending = "Pending" + // ImagePolicyApplied indicates that the policy has been applied + ImagePolicyApplied = "Applied" +) diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go b/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go new file mode 100644 index 0000000000000..3ae4de157cfd2 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go @@ -0,0 +1,88 @@ +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +// InsightsDataGather provides data gather configuration options for the the Insights Operator. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=insightsdatagathers,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1245 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=InsightsConfig +// +openshift:compatibility-gen:level=4 +type InsightsDataGather struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec InsightsDataGatherSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status InsightsDataGatherStatus `json:"status"` +} + +type InsightsDataGatherSpec struct { + // gatherConfig spec attribute includes all the configuration options related to + // gathering of the Insights data and its uploading to the ingress. + // +optional + GatherConfig GatherConfig `json:"gatherConfig,omitempty"` +} + +type InsightsDataGatherStatus struct { +} + +// gatherConfig provides data gathering configuration options. +type GatherConfig struct { + // dataPolicy allows user to enable additional global obfuscation of the IP addresses and base domain + // in the Insights archive data. Valid values are "None" and "ObfuscateNetworking". + // When set to None the data is not obfuscated. + // When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default is None. + // +optional + DataPolicy DataPolicy `json:"dataPolicy,omitempty"` + // disabledGatherers is a list of gatherers to be excluded from the gathering. All the gatherers can be disabled by providing "all" value. + // If all the gatherers are disabled, the Insights operator does not gather any data. + // The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. + // Run the following command to get the names of last active gatherers: + // "oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'" + // An example of disabling gatherers looks like this: `disabledGatherers: ["clusterconfig/machine_configs", "workloads/workload_info"]` + // +optional + DisabledGatherers []string `json:"disabledGatherers"` +} + +const ( + // No data obfuscation + NoPolicy DataPolicy = "None" + // IP addresses and cluster domain name are obfuscated + ObfuscateNetworking DataPolicy = "ObfuscateNetworking" +) + +// dataPolicy declares valid data policy types +// +kubebuilder:validation:Enum="";None;ObfuscateNetworking +type DataPolicy string + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InsightsDataGatherList is a collection of items +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type InsightsDataGatherList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + Items []InsightsDataGather `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..ab39b5b91544d --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,678 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Backup) DeepCopyInto(out *Backup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backup. +func (in *Backup) DeepCopy() *Backup { + if in == nil { + return nil + } + out := new(Backup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Backup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupList) DeepCopyInto(out *BackupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Backup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupList. +func (in *BackupList) DeepCopy() *BackupList { + if in == nil { + return nil + } + out := new(BackupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupSpec) DeepCopyInto(out *BackupSpec) { + *out = *in + in.EtcdBackupSpec.DeepCopyInto(&out.EtcdBackupSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSpec. +func (in *BackupSpec) DeepCopy() *BackupSpec { + if in == nil { + return nil + } + out := new(BackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupStatus) DeepCopyInto(out *BackupStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStatus. +func (in *BackupStatus) DeepCopy() *BackupStatus { + if in == nil { + return nil + } + out := new(BackupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicy) DeepCopyInto(out *ClusterImagePolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicy. +func (in *ClusterImagePolicy) DeepCopy() *ClusterImagePolicy { + if in == nil { + return nil + } + out := new(ClusterImagePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterImagePolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicyList) DeepCopyInto(out *ClusterImagePolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterImagePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicyList. +func (in *ClusterImagePolicyList) DeepCopy() *ClusterImagePolicyList { + if in == nil { + return nil + } + out := new(ClusterImagePolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterImagePolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicySpec) DeepCopyInto(out *ClusterImagePolicySpec) { + *out = *in + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ImageScope, len(*in)) + copy(*out, *in) + } + in.Policy.DeepCopyInto(&out.Policy) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicySpec. +func (in *ClusterImagePolicySpec) DeepCopy() *ClusterImagePolicySpec { + if in == nil { + return nil + } + out := new(ClusterImagePolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicyStatus) DeepCopyInto(out *ClusterImagePolicyStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicyStatus. +func (in *ClusterImagePolicyStatus) DeepCopy() *ClusterImagePolicyStatus { + if in == nil { + return nil + } + out := new(ClusterImagePolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdBackupSpec) DeepCopyInto(out *EtcdBackupSpec) { + *out = *in + in.RetentionPolicy.DeepCopyInto(&out.RetentionPolicy) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdBackupSpec. +func (in *EtcdBackupSpec) DeepCopy() *EtcdBackupSpec { + if in == nil { + return nil + } + out := new(EtcdBackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FulcioCAWithRekor) DeepCopyInto(out *FulcioCAWithRekor) { + *out = *in + if in.FulcioCAData != nil { + in, out := &in.FulcioCAData, &out.FulcioCAData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.RekorKeyData != nil { + in, out := &in.RekorKeyData, &out.RekorKeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + out.FulcioSubject = in.FulcioSubject + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FulcioCAWithRekor. +func (in *FulcioCAWithRekor) DeepCopy() *FulcioCAWithRekor { + if in == nil { + return nil + } + out := new(FulcioCAWithRekor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatherConfig) DeepCopyInto(out *GatherConfig) { + *out = *in + if in.DisabledGatherers != nil { + in, out := &in.DisabledGatherers, &out.DisabledGatherers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatherConfig. +func (in *GatherConfig) DeepCopy() *GatherConfig { + if in == nil { + return nil + } + out := new(GatherConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicy) DeepCopyInto(out *ImagePolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicy. +func (in *ImagePolicy) DeepCopy() *ImagePolicy { + if in == nil { + return nil + } + out := new(ImagePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImagePolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyList) DeepCopyInto(out *ImagePolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImagePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyList. +func (in *ImagePolicyList) DeepCopy() *ImagePolicyList { + if in == nil { + return nil + } + out := new(ImagePolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImagePolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicySpec) DeepCopyInto(out *ImagePolicySpec) { + *out = *in + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ImageScope, len(*in)) + copy(*out, *in) + } + in.Policy.DeepCopyInto(&out.Policy) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicySpec. +func (in *ImagePolicySpec) DeepCopy() *ImagePolicySpec { + if in == nil { + return nil + } + out := new(ImagePolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyStatus) DeepCopyInto(out *ImagePolicyStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyStatus. +func (in *ImagePolicyStatus) DeepCopy() *ImagePolicyStatus { + if in == nil { + return nil + } + out := new(ImagePolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsDataGather) DeepCopyInto(out *InsightsDataGather) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGather. +func (in *InsightsDataGather) DeepCopy() *InsightsDataGather { + if in == nil { + return nil + } + out := new(InsightsDataGather) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InsightsDataGather) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsDataGatherList) DeepCopyInto(out *InsightsDataGatherList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]InsightsDataGather, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGatherList. +func (in *InsightsDataGatherList) DeepCopy() *InsightsDataGatherList { + if in == nil { + return nil + } + out := new(InsightsDataGatherList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InsightsDataGatherList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsDataGatherSpec) DeepCopyInto(out *InsightsDataGatherSpec) { + *out = *in + in.GatherConfig.DeepCopyInto(&out.GatherConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGatherSpec. +func (in *InsightsDataGatherSpec) DeepCopy() *InsightsDataGatherSpec { + if in == nil { + return nil + } + out := new(InsightsDataGatherSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsDataGatherStatus) DeepCopyInto(out *InsightsDataGatherStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGatherStatus. +func (in *InsightsDataGatherStatus) DeepCopy() *InsightsDataGatherStatus { + if in == nil { + return nil + } + out := new(InsightsDataGatherStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + in.RootOfTrust.DeepCopyInto(&out.RootOfTrust) + in.SignedIdentity.DeepCopyInto(&out.SignedIdentity) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyFulcioSubject) DeepCopyInto(out *PolicyFulcioSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyFulcioSubject. +func (in *PolicyFulcioSubject) DeepCopy() *PolicyFulcioSubject { + if in == nil { + return nil + } + out := new(PolicyFulcioSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyIdentity) DeepCopyInto(out *PolicyIdentity) { + *out = *in + if in.PolicyMatchExactRepository != nil { + in, out := &in.PolicyMatchExactRepository, &out.PolicyMatchExactRepository + *out = new(PolicyMatchExactRepository) + **out = **in + } + if in.PolicyMatchRemapIdentity != nil { + in, out := &in.PolicyMatchRemapIdentity, &out.PolicyMatchRemapIdentity + *out = new(PolicyMatchRemapIdentity) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyIdentity. +func (in *PolicyIdentity) DeepCopy() *PolicyIdentity { + if in == nil { + return nil + } + out := new(PolicyIdentity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyMatchExactRepository) DeepCopyInto(out *PolicyMatchExactRepository) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMatchExactRepository. +func (in *PolicyMatchExactRepository) DeepCopy() *PolicyMatchExactRepository { + if in == nil { + return nil + } + out := new(PolicyMatchExactRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyMatchRemapIdentity) DeepCopyInto(out *PolicyMatchRemapIdentity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMatchRemapIdentity. +func (in *PolicyMatchRemapIdentity) DeepCopy() *PolicyMatchRemapIdentity { + if in == nil { + return nil + } + out := new(PolicyMatchRemapIdentity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRootOfTrust) DeepCopyInto(out *PolicyRootOfTrust) { + *out = *in + if in.PublicKey != nil { + in, out := &in.PublicKey, &out.PublicKey + *out = new(PublicKey) + (*in).DeepCopyInto(*out) + } + if in.FulcioCAWithRekor != nil { + in, out := &in.FulcioCAWithRekor, &out.FulcioCAWithRekor + *out = new(FulcioCAWithRekor) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRootOfTrust. +func (in *PolicyRootOfTrust) DeepCopy() *PolicyRootOfTrust { + if in == nil { + return nil + } + out := new(PolicyRootOfTrust) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicKey) DeepCopyInto(out *PublicKey) { + *out = *in + if in.KeyData != nil { + in, out := &in.KeyData, &out.KeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.RekorKeyData != nil { + in, out := &in.RekorKeyData, &out.RekorKeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicKey. +func (in *PublicKey) DeepCopy() *PublicKey { + if in == nil { + return nil + } + out := new(PublicKey) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionNumberConfig) DeepCopyInto(out *RetentionNumberConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionNumberConfig. +func (in *RetentionNumberConfig) DeepCopy() *RetentionNumberConfig { + if in == nil { + return nil + } + out := new(RetentionNumberConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionPolicy) DeepCopyInto(out *RetentionPolicy) { + *out = *in + if in.RetentionNumber != nil { + in, out := &in.RetentionNumber, &out.RetentionNumber + *out = new(RetentionNumberConfig) + **out = **in + } + if in.RetentionSize != nil { + in, out := &in.RetentionSize, &out.RetentionSize + *out = new(RetentionSizeConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPolicy. +func (in *RetentionPolicy) DeepCopy() *RetentionPolicy { + if in == nil { + return nil + } + out := new(RetentionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionSizeConfig) DeepCopyInto(out *RetentionSizeConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionSizeConfig. +func (in *RetentionSizeConfig) DeepCopy() *RetentionSizeConfig { + if in == nil { + return nil + } + out := new(RetentionSizeConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 0000000000000..393365b41c327 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,92 @@ +backups.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/1482 + CRDName: backups.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - AutomatedEtcdBackup + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Backup + Labels: {} + PluralName: backups + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - AutomatedEtcdBackup + Version: v1alpha1 + +clusterimagepolicies.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/1457 + CRDName: clusterimagepolicies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - SigstoreImageVerification + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ClusterImagePolicy + Labels: {} + PluralName: clusterimagepolicies + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - SigstoreImageVerification + Version: v1alpha1 + +imagepolicies.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/1457 + CRDName: imagepolicies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - SigstoreImageVerification + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ImagePolicy + Labels: {} + PluralName: imagepolicies + PrinterColumns: [] + Scope: Namespaced + ShortNames: null + TopLevelFeatureGates: + - SigstoreImageVerification + Version: v1alpha1 + +insightsdatagathers.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/1245 + CRDName: insightsdatagathers.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - InsightsConfig + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: InsightsDataGather + Labels: {} + PluralName: insightsdatagathers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - InsightsConfig + Version: v1alpha1 + diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000..e6accce0d721c --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,277 @@ +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Backup = map[string]string{ + "": "\n\nBackup provides configuration for performing backups of the openshift cluster.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Backup) SwaggerDoc() map[string]string { + return map_Backup +} + +var map_BackupList = map[string]string{ + "": "BackupList is a collection of items\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (BackupList) SwaggerDoc() map[string]string { + return map_BackupList +} + +var map_BackupSpec = map[string]string{ + "etcd": "etcd specifies the configuration for periodic backups of the etcd cluster", +} + +func (BackupSpec) SwaggerDoc() map[string]string { + return map_BackupSpec +} + +var map_EtcdBackupSpec = map[string]string{ + "": "EtcdBackupSpec provides configuration for automated etcd backups to the cluster-etcd-operator", + "schedule": "schedule defines the recurring backup schedule in Cron format every 2 hours: 0 */2 * * * every day at 3am: 0 3 * * * Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is \"no backups\", but will change in the future.", + "timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. See https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones", + "retentionPolicy": "retentionPolicy defines the retention policy for retaining and deleting existing backups.", + "pvcName": "pvcName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup files would be saved The PVC itself must always be created in the \"openshift-etcd\" namespace If the PVC is left unspecified \"\" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes.", +} + +func (EtcdBackupSpec) SwaggerDoc() map[string]string { + return map_EtcdBackupSpec +} + +var map_RetentionNumberConfig = map[string]string{ + "": "RetentionNumberConfig specifies the configuration of the retention policy on the number of backups", + "maxNumberOfBackups": "maxNumberOfBackups defines the maximum number of backups to retain. If the existing number of backups saved is equal to MaxNumberOfBackups then the oldest backup will be removed before a new backup is initiated.", +} + +func (RetentionNumberConfig) SwaggerDoc() map[string]string { + return map_RetentionNumberConfig +} + +var map_RetentionPolicy = map[string]string{ + "": "RetentionPolicy defines the retention policy for retaining and deleting existing backups. This struct is a discriminated union that allows users to select the type of retention policy from the supported types.", + "retentionType": "retentionType sets the type of retention policy. Currently, the only valid policies are retention by number of backups (RetentionNumber), by the size of backups (RetentionSize). More policies or types may be added in the future. Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is RetentionNumber with 15 backups kept.", + "retentionNumber": "retentionNumber configures the retention policy based on the number of backups", + "retentionSize": "retentionSize configures the retention policy based on the size of backups", +} + +func (RetentionPolicy) SwaggerDoc() map[string]string { + return map_RetentionPolicy +} + +var map_RetentionSizeConfig = map[string]string{ + "": "RetentionSizeConfig specifies the configuration of the retention policy on the total size of backups", + "maxSizeOfBackupsGb": "maxSizeOfBackupsGb defines the total size in GB of backups to retain. If the current total size backups exceeds MaxSizeOfBackupsGb then the oldest backup will be removed before a new backup is initiated.", +} + +func (RetentionSizeConfig) SwaggerDoc() map[string]string { + return map_RetentionSizeConfig +} + +var map_ClusterImagePolicy = map[string]string{ + "": "ClusterImagePolicy holds cluster-wide configuration for image signature verification\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec contains the configuration for the cluster image policy.", + "status": "status contains the observed state of the resource.", +} + +func (ClusterImagePolicy) SwaggerDoc() map[string]string { + return map_ClusterImagePolicy +} + +var map_ClusterImagePolicyList = map[string]string{ + "": "ClusterImagePolicyList is a list of ClusterImagePolicy resources\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ClusterImagePolicyList) SwaggerDoc() map[string]string { + return map_ClusterImagePolicyList +} + +var map_ClusterImagePolicySpec = map[string]string{ + "": "CLusterImagePolicySpec is the specification of the ClusterImagePolicy custom resource.", + "scopes": "scopes defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker", + "policy": "policy contains configuration to allow scopes to be verified, and defines how images not matching the verification policy will be treated.", +} + +func (ClusterImagePolicySpec) SwaggerDoc() map[string]string { + return map_ClusterImagePolicySpec +} + +var map_ClusterImagePolicyStatus = map[string]string{ + "conditions": "conditions provide details on the status of this API Resource.", +} + +func (ClusterImagePolicyStatus) SwaggerDoc() map[string]string { + return map_ClusterImagePolicyStatus +} + +var map_FulcioCAWithRekor = map[string]string{ + "": "FulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key.", + "fulcioCAData": "fulcioCAData contains inline base64-encoded data for the PEM format fulcio CA. fulcioCAData must be at most 8192 characters.", + "rekorKeyData": "rekorKeyData contains inline base64-encoded data for the PEM format from the Rekor public key. rekorKeyData must be at most 8192 characters.", + "fulcioSubject": "fulcioSubject specifies OIDC issuer and the email of the Fulcio authentication configuration.", +} + +func (FulcioCAWithRekor) SwaggerDoc() map[string]string { + return map_FulcioCAWithRekor +} + +var map_ImagePolicy = map[string]string{ + "": "ImagePolicy holds namespace-wide configuration for image signature verification\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status contains the observed state of the resource.", +} + +func (ImagePolicy) SwaggerDoc() map[string]string { + return map_ImagePolicy +} + +var map_ImagePolicyList = map[string]string{ + "": "ImagePolicyList is a list of ImagePolicy resources\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ImagePolicyList) SwaggerDoc() map[string]string { + return map_ImagePolicyList +} + +var map_ImagePolicySpec = map[string]string{ + "": "ImagePolicySpec is the specification of the ImagePolicy CRD.", + "scopes": "scopes defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker", + "policy": "policy contains configuration to allow scopes to be verified, and defines how images not matching the verification policy will be treated.", +} + +func (ImagePolicySpec) SwaggerDoc() map[string]string { + return map_ImagePolicySpec +} + +var map_ImagePolicyStatus = map[string]string{ + "conditions": "conditions provide details on the status of this API Resource.", +} + +func (ImagePolicyStatus) SwaggerDoc() map[string]string { + return map_ImagePolicyStatus +} + +var map_Policy = map[string]string{ + "": "Policy defines the verification policy for the items in the scopes list.", + "rootOfTrust": "rootOfTrust specifies the root of trust for the policy.", + "signedIdentity": "signedIdentity specifies what image identity the signature claims about the image. The required matchPolicy field specifies the approach used in the verification process to verify the identity in the signature and the actual image identity, the default matchPolicy is \"MatchRepoDigestOrExact\".", +} + +func (Policy) SwaggerDoc() map[string]string { + return map_Policy +} + +var map_PolicyFulcioSubject = map[string]string{ + "": "PolicyFulcioSubject defines the OIDC issuer and the email of the Fulcio authentication configuration.", + "oidcIssuer": "oidcIssuer contains the expected OIDC issuer. It will be verified that the Fulcio-issued certificate contains a (Fulcio-defined) certificate extension pointing at this OIDC issuer URL. When Fulcio issues certificates, it includes a value based on an URL inside the client-provided ID token. Example: \"https://expected.OIDC.issuer/\"", + "signedEmail": "signedEmail holds the email address the the Fulcio certificate is issued for. Example: \"expected-signing-user@example.com\"", +} + +func (PolicyFulcioSubject) SwaggerDoc() map[string]string { + return map_PolicyFulcioSubject +} + +var map_PolicyIdentity = map[string]string{ + "": "PolicyIdentity defines image identity the signature claims about the image. When omitted, the default matchPolicy is \"MatchRepoDigestOrExact\".", + "matchPolicy": "matchPolicy sets the type of matching to be used. Valid values are \"MatchRepoDigestOrExact\", \"MatchRepository\", \"ExactRepository\", \"RemapIdentity\". When omitted, the default value is \"MatchRepoDigestOrExact\". If set matchPolicy to ExactRepository, then the exactRepository must be specified. If set matchPolicy to RemapIdentity, then the remapIdentity must be specified. \"MatchRepoDigestOrExact\" means that the identity in the signature must be in the same repository as the image identity if the image identity is referenced by a digest. Otherwise, the identity in the signature must be the same as the image identity. \"MatchRepository\" means that the identity in the signature must be in the same repository as the image identity. \"ExactRepository\" means that the identity in the signature must be in the same repository as a specific identity specified by \"repository\". \"RemapIdentity\" means that the signature must be in the same as the remapped image identity. Remapped image identity is obtained by replacing the \"prefix\" with the specified “signedPrefix” if the the image identity matches the specified remapPrefix.", + "exactRepository": "exactRepository is required if matchPolicy is set to \"ExactRepository\".", + "remapIdentity": "remapIdentity is required if matchPolicy is set to \"RemapIdentity\".", +} + +func (PolicyIdentity) SwaggerDoc() map[string]string { + return map_PolicyIdentity +} + +var map_PolicyMatchExactRepository = map[string]string{ + "repository": "repository is the reference of the image identity to be matched. The value should be a repository name (by omitting the tag or digest) in a registry implementing the \"Docker Registry HTTP API V2\". For example, docker.io/library/busybox", +} + +func (PolicyMatchExactRepository) SwaggerDoc() map[string]string { + return map_PolicyMatchExactRepository +} + +var map_PolicyMatchRemapIdentity = map[string]string{ + "prefix": "prefix is the prefix of the image identity to be matched. If the image identity matches the specified prefix, that prefix is replaced by the specified “signedPrefix” (otherwise it is used as unchanged and no remapping takes place). This useful when verifying signatures for a mirror of some other repository namespace that preserves the vendor’s repository structure. The prefix and signedPrefix values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox.", + "signedPrefix": "signedPrefix is the prefix of the image identity to be matched in the signature. The format is the same as \"prefix\". The values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox.", +} + +func (PolicyMatchRemapIdentity) SwaggerDoc() map[string]string { + return map_PolicyMatchRemapIdentity +} + +var map_PolicyRootOfTrust = map[string]string{ + "": "PolicyRootOfTrust defines the root of trust based on the selected policyType.", + "policyType": "policyType serves as the union's discriminator. Users are required to assign a value to this field, choosing one of the policy types that define the root of trust. \"PublicKey\" indicates that the policy relies on a sigstore publicKey and may optionally use a Rekor verification. \"FulcioCAWithRekor\" indicates that the policy is based on the Fulcio certification and incorporates a Rekor verification.", + "publicKey": "publicKey defines the root of trust based on a sigstore public key.", + "fulcioCAWithRekor": "fulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key. For more information about Fulcio and Rekor, please refer to the document at: https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor", +} + +func (PolicyRootOfTrust) SwaggerDoc() map[string]string { + return map_PolicyRootOfTrust +} + +var map_PublicKey = map[string]string{ + "": "PublicKey defines the root of trust based on a sigstore public key.", + "keyData": "keyData contains inline base64-encoded data for the PEM format public key. KeyData must be at most 8192 characters.", + "rekorKeyData": "rekorKeyData contains inline base64-encoded data for the PEM format from the Rekor public key. rekorKeyData must be at most 8192 characters.", +} + +func (PublicKey) SwaggerDoc() map[string]string { + return map_PublicKey +} + +var map_GatherConfig = map[string]string{ + "": "gatherConfig provides data gathering configuration options.", + "dataPolicy": "dataPolicy allows user to enable additional global obfuscation of the IP addresses and base domain in the Insights archive data. Valid values are \"None\" and \"ObfuscateNetworking\". When set to None the data is not obfuscated. When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is None.", + "disabledGatherers": "disabledGatherers is a list of gatherers to be excluded from the gathering. All the gatherers can be disabled by providing \"all\" value. If all the gatherers are disabled, the Insights operator does not gather any data. The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. Run the following command to get the names of last active gatherers: \"oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'\" An example of disabling gatherers looks like this: `disabledGatherers: [\"clusterconfig/machine_configs\", \"workloads/workload_info\"]`", +} + +func (GatherConfig) SwaggerDoc() map[string]string { + return map_GatherConfig +} + +var map_InsightsDataGather = map[string]string{ + "": "\n\nInsightsDataGather provides data gather configuration options for the the Insights Operator.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (InsightsDataGather) SwaggerDoc() map[string]string { + return map_InsightsDataGather +} + +var map_InsightsDataGatherList = map[string]string{ + "": "InsightsDataGatherList is a collection of items\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (InsightsDataGatherList) SwaggerDoc() map[string]string { + return map_InsightsDataGatherList +} + +var map_InsightsDataGatherSpec = map[string]string{ + "gatherConfig": "gatherConfig spec attribute includes all the configuration options related to gathering of the Insights data and its uploading to the ingress.", +} + +func (InsightsDataGatherSpec) SwaggerDoc() map[string]string { + return map_InsightsDataGatherSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/features/features.go b/vendor/github.com/openshift/api/features/features.go new file mode 100644 index 0000000000000..d963a41e572b0 --- /dev/null +++ b/vendor/github.com/openshift/api/features/features.go @@ -0,0 +1,713 @@ +package features + +import ( + "fmt" + + configv1 "github.com/openshift/api/config/v1" +) + +func FeatureSets(clusterProfile ClusterProfileName, featureSet configv1.FeatureSet) (*FeatureGateEnabledDisabled, error) { + byFeatureSet, ok := allFeatureGates[clusterProfile] + if !ok { + return nil, fmt.Errorf("no information found for ClusterProfile=%q", clusterProfile) + } + featureGates, ok := byFeatureSet[featureSet] + if !ok { + return nil, fmt.Errorf("no information found for FeatureSet=%q under ClusterProfile=%q", featureSet, clusterProfile) + } + return featureGates.DeepCopy(), nil +} + +func AllFeatureSets() map[ClusterProfileName]map[configv1.FeatureSet]*FeatureGateEnabledDisabled { + ret := map[ClusterProfileName]map[configv1.FeatureSet]*FeatureGateEnabledDisabled{} + + for clusterProfile, byFeatureSet := range allFeatureGates { + newByFeatureSet := map[configv1.FeatureSet]*FeatureGateEnabledDisabled{} + + for featureSet, enabledDisabled := range byFeatureSet { + newByFeatureSet[featureSet] = enabledDisabled.DeepCopy() + } + ret[clusterProfile] = newByFeatureSet + } + + return ret +} + +var ( + allFeatureGates = map[ClusterProfileName]map[configv1.FeatureSet]*FeatureGateEnabledDisabled{} + + FeatureGateConsolePluginCSP = newFeatureGate("ConsolePluginContentSecurityPolicy"). + reportProblemsToJiraComponent("Management Console"). + contactPerson("jhadvig"). + productScope(ocpSpecific). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enhancementPR("https://github.com/openshift/enhancements/pull/1706"). + mustRegister() + + FeatureGateServiceAccountTokenNodeBinding = newFeatureGate("ServiceAccountTokenNodeBinding"). + reportProblemsToJiraComponent("apiserver-auth"). + contactPerson("stlaz"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/4193"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateValidatingAdmissionPolicy = newFeatureGate("ValidatingAdmissionPolicy"). + reportProblemsToJiraComponent("kube-apiserver"). + contactPerson("benluddy"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/3488"). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateGatewayAPI = newFeatureGate("GatewayAPI"). + reportProblemsToJiraComponent("Routing"). + contactPerson("miciah"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade). + mustRegister() + + FeatureGateSetEIPForNLBIngressController = newFeatureGate("SetEIPForNLBIngressController"). + reportProblemsToJiraComponent("Networking / router"). + contactPerson("miheer"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateOpenShiftPodSecurityAdmission = newFeatureGate("OpenShiftPodSecurityAdmission"). + reportProblemsToJiraComponent("auth"). + contactPerson("ibihim"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/899"). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateBuildCSIVolumes = newFeatureGate("BuildCSIVolumes"). + reportProblemsToJiraComponent("builds"). + contactPerson("adkaplan"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateNodeSwap = newFeatureGate("NodeSwap"). + reportProblemsToJiraComponent("node"). + contactPerson("ehashman"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/2400"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateMachineAPIProviderOpenStack = newFeatureGate("MachineAPIProviderOpenStack"). + reportProblemsToJiraComponent("openstack"). + contactPerson("egarcia"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateInsightsConfigAPI = newFeatureGate("InsightsConfigAPI"). + reportProblemsToJiraComponent("insights"). + contactPerson("tremes"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateInsightsRuntimeExtractor = newFeatureGate("InsightsRuntimeExtractor"). + reportProblemsToJiraComponent("insights"). + contactPerson("jmesnil"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateDynamicResourceAllocation = newFeatureGate("DynamicResourceAllocation"). + reportProblemsToJiraComponent("scheduling"). + contactPerson("jchaloup"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/4381"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateAzureWorkloadIdentity = newFeatureGate("AzureWorkloadIdentity"). + reportProblemsToJiraComponent("cloud-credential-operator"). + contactPerson("abutcher"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateMaxUnavailableStatefulSet = newFeatureGate("MaxUnavailableStatefulSet"). + reportProblemsToJiraComponent("apps"). + contactPerson("atiratree"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/961"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateEventedPLEG = newFeatureGate("EventedPLEG"). + reportProblemsToJiraComponent("node"). + contactPerson("sairameshv"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/3386"). + mustRegister() + + FeatureGatePrivateHostedZoneAWS = newFeatureGate("PrivateHostedZoneAWS"). + reportProblemsToJiraComponent("Routing"). + contactPerson("miciah"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateSigstoreImageVerification = newFeatureGate("SigstoreImageVerification"). + reportProblemsToJiraComponent("node"). + contactPerson("sgrunert"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateGCPLabelsTags = newFeatureGate("GCPLabelsTags"). + reportProblemsToJiraComponent("Installer"). + contactPerson("bhb"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateAlibabaPlatform = newFeatureGate("AlibabaPlatform"). + reportProblemsToJiraComponent("cloud-provider"). + contactPerson("jspeed"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateCloudDualStackNodeIPs = newFeatureGate("CloudDualStackNodeIPs"). + reportProblemsToJiraComponent("machine-config-operator/platform-baremetal"). + contactPerson("mkowalsk"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/3705"). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateVSphereHostVMGroupZonal = newFeatureGate("VSphereHostVMGroupZonal"). + reportProblemsToJiraComponent("splat"). + contactPerson("jcpowermac"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1677"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateVSphereMultiVCenters = newFeatureGate("VSphereMultiVCenters"). + reportProblemsToJiraComponent("splat"). + contactPerson("vr4manta"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateVSphereStaticIPs = newFeatureGate("VSphereStaticIPs"). + reportProblemsToJiraComponent("splat"). + contactPerson("rvanderp3"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateRouteExternalCertificate = newFeatureGate("RouteExternalCertificate"). + reportProblemsToJiraComponent("router"). + contactPerson("thejasn"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateCPMSMachineNamePrefix = newFeatureGate("CPMSMachineNamePrefix"). + reportProblemsToJiraComponent("Cloud Compute / ControlPlaneMachineSet"). + contactPerson("chiragkyal"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1714"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateAdminNetworkPolicy = newFeatureGate("AdminNetworkPolicy"). + reportProblemsToJiraComponent("Networking/ovn-kubernetes"). + contactPerson("tssurya"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateNetworkSegmentation = newFeatureGate("NetworkSegmentation"). + reportProblemsToJiraComponent("Networking/ovn-kubernetes"). + contactPerson("tssurya"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateAdditionalRoutingCapabilities = newFeatureGate("AdditionalRoutingCapabilities"). + reportProblemsToJiraComponent("Networking/cluster-network-operator"). + contactPerson("jcaamano"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateRouteAdvertisements = newFeatureGate("RouteAdvertisements"). + reportProblemsToJiraComponent("Networking/ovn-kubernetes"). + contactPerson("jcaamano"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateNetworkLiveMigration = newFeatureGate("NetworkLiveMigration"). + reportProblemsToJiraComponent("Networking/ovn-kubernetes"). + contactPerson("pliu"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateNetworkDiagnosticsConfig = newFeatureGate("NetworkDiagnosticsConfig"). + reportProblemsToJiraComponent("Networking/cluster-network-operator"). + contactPerson("kyrtapz"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateOVNObservability = newFeatureGate("OVNObservability"). + reportProblemsToJiraComponent("Networking"). + contactPerson("npinaeva"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateHardwareSpeed = newFeatureGate("HardwareSpeed"). + reportProblemsToJiraComponent("etcd"). + contactPerson("hasbro17"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateBackendQuotaGiB = newFeatureGate("EtcdBackendQuota"). + reportProblemsToJiraComponent("etcd"). + contactPerson("hasbro17"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateAutomatedEtcdBackup = newFeatureGate("AutomatedEtcdBackup"). + reportProblemsToJiraComponent("etcd"). + contactPerson("hasbro17"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateMachineAPIOperatorDisableMachineHealthCheckController = newFeatureGate("MachineAPIOperatorDisableMachineHealthCheckController"). + reportProblemsToJiraComponent("ecoproject"). + contactPerson("msluiter"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + mustRegister() + + FeatureGateDNSNameResolver = newFeatureGate("DNSNameResolver"). + reportProblemsToJiraComponent("dns"). + contactPerson("miciah"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateVSphereControlPlaneMachineset = newFeatureGate("VSphereControlPlaneMachineSet"). + reportProblemsToJiraComponent("splat"). + contactPerson("rvanderp3"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateMachineConfigNodes = newFeatureGate("MachineConfigNodes"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("cdoern"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateClusterAPIInstall = newFeatureGate("ClusterAPIInstall"). + reportProblemsToJiraComponent("Installer"). + contactPerson("vincepri"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + mustRegister() + + FeatureGateGCPClusterHostedDNS = newFeatureGate("GCPClusterHostedDNS"). + reportProblemsToJiraComponent("Installer"). + contactPerson("barbacbd"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateAWSClusterHostedDNS = newFeatureGate("AWSClusterHostedDNS"). + reportProblemsToJiraComponent("Installer"). + contactPerson("barbacbd"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateMixedCPUsAllocation = newFeatureGate("MixedCPUsAllocation"). + reportProblemsToJiraComponent("NodeTuningOperator"). + contactPerson("titzhak"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateManagedBootImages = newFeatureGate("ManagedBootImages"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("djoshy"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateManagedBootImagesAWS = newFeatureGate("ManagedBootImagesAWS"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("djoshy"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateDisableKubeletCloudCredentialProviders = newFeatureGate("DisableKubeletCloudCredentialProviders"). + reportProblemsToJiraComponent("cloud-provider"). + contactPerson("jspeed"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/2395"). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateOnClusterBuild = newFeatureGate("OnClusterBuild"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("dkhater"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateBootcNodeManagement = newFeatureGate("BootcNodeManagement"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("inesqyx"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateSignatureStores = newFeatureGate("SignatureStores"). + reportProblemsToJiraComponent("Cluster Version Operator"). + contactPerson("lmohanty"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateKMSv1 = newFeatureGate("KMSv1"). + reportProblemsToJiraComponent("kube-apiserver"). + contactPerson("dgrisonnet"). + productScope(kubernetes). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGatePinnedImages = newFeatureGate("PinnedImages"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("jhernand"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateUpgradeStatus = newFeatureGate("UpgradeStatus"). + reportProblemsToJiraComponent("Cluster Version Operator"). + contactPerson("pmuller"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateTranslateStreamCloseWebsocketRequests = newFeatureGate("TranslateStreamCloseWebsocketRequests"). + reportProblemsToJiraComponent("kube-apiserver"). + contactPerson("akashem"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/4006"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateVolumeAttributesClass = newFeatureGate("VolumeAttributesClass"). + reportProblemsToJiraComponent("Storage / Kubernetes External Components"). + contactPerson("dfajmon"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/3751"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateVolumeGroupSnapshot = newFeatureGate("VolumeGroupSnapshot"). + reportProblemsToJiraComponent("Storage / Kubernetes External Components"). + contactPerson("fbertina"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/3476"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateExternalOIDC = newFeatureGate("ExternalOIDC"). + reportProblemsToJiraComponent("authentication"). + contactPerson("liouk"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1596"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableForClusterProfile(Hypershift, configv1.Default, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateExample = newFeatureGate("Example"). + reportProblemsToJiraComponent("cluster-config"). + contactPerson("deads"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGatePlatformOperators = newFeatureGate("PlatformOperators"). + reportProblemsToJiraComponent("olm"). + contactPerson("joe"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateNewOLM = newFeatureGate("NewOLM"). + reportProblemsToJiraComponent("olm"). + contactPerson("joe"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableForClusterProfile(SelfManaged, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). + mustRegister() + + FeatureGateInsightsOnDemandDataGather = newFeatureGate("InsightsOnDemandDataGather"). + reportProblemsToJiraComponent("insights"). + contactPerson("tremes"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateBareMetalLoadBalancer = newFeatureGate("BareMetalLoadBalancer"). + reportProblemsToJiraComponent("metal"). + contactPerson("EmilienM"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateInsightsConfig = newFeatureGate("InsightsConfig"). + reportProblemsToJiraComponent("insights"). + contactPerson("tremes"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateNodeDisruptionPolicy = newFeatureGate("NodeDisruptionPolicy"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("jerzhang"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateMetricsCollectionProfiles = newFeatureGate("MetricsCollectionProfiles"). + reportProblemsToJiraComponent("Monitoring"). + contactPerson("rexagod"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateVSphereDriverConfiguration = newFeatureGate("VSphereDriverConfiguration"). + reportProblemsToJiraComponent("Storage / Kubernetes External Components"). + contactPerson("rbednar"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateClusterAPIInstallIBMCloud = newFeatureGate("ClusterAPIInstallIBMCloud"). + reportProblemsToJiraComponent("Installer"). + contactPerson("cjschaef"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + mustRegister() + + FeatureGateChunkSizeMiB = newFeatureGate("ChunkSizeMiB"). + reportProblemsToJiraComponent("Image Registry"). + contactPerson("flavianmissi"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateMachineAPIMigration = newFeatureGate("MachineAPIMigration"). + reportProblemsToJiraComponent("OCPCLOUD"). + contactPerson("jspeed"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + mustRegister() + + FeatureGatePersistentIPsForVirtualization = newFeatureGate("PersistentIPsForVirtualization"). + reportProblemsToJiraComponent("CNV Network"). + contactPerson("mduarted"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateClusterMonitoringConfig = newFeatureGate("ClusterMonitoringConfig"). + reportProblemsToJiraComponent("Monitoring"). + contactPerson("marioferh"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateMultiArchInstallAWS = newFeatureGate("MultiArchInstallAWS"). + reportProblemsToJiraComponent("Installer"). + contactPerson("r4f4"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateMultiArchInstallAzure = newFeatureGate("MultiArchInstallAzure"). + reportProblemsToJiraComponent("Installer"). + contactPerson("r4f4"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + mustRegister() + + FeatureGateMultiArchInstallGCP = newFeatureGate("MultiArchInstallGCP"). + reportProblemsToJiraComponent("Installer"). + contactPerson("r4f4"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateIngressControllerLBSubnetsAWS = newFeatureGate("IngressControllerLBSubnetsAWS"). + reportProblemsToJiraComponent("Routing"). + contactPerson("miciah"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateAWSEFSDriverVolumeMetrics = newFeatureGate("AWSEFSDriverVolumeMetrics"). + reportProblemsToJiraComponent("Storage / Kubernetes External Components"). + contactPerson("fbertina"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateImageStreamImportMode = newFeatureGate("ImageStreamImportMode"). + reportProblemsToJiraComponent("Multi-Arch"). + contactPerson("psundara"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateUserNamespacesSupport = newFeatureGate("UserNamespacesSupport"). + reportProblemsToJiraComponent("Node"). + contactPerson("haircommander"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/127"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateUserNamespacesPodSecurityStandards = newFeatureGate("UserNamespacesPodSecurityStandards"). + reportProblemsToJiraComponent("Node"). + contactPerson("haircommander"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/127"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateProcMountType = newFeatureGate("ProcMountType"). + reportProblemsToJiraComponent("Node"). + contactPerson("haircommander"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/4265"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateVSphereMultiNetworks = newFeatureGate("VSphereMultiNetworks"). + reportProblemsToJiraComponent("SPLAT"). + contactPerson("rvanderp"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateIngressControllerDynamicConfigurationManager = newFeatureGate("IngressControllerDynamicConfigurationManager"). + reportProblemsToJiraComponent("Networking/router"). + contactPerson("miciah"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateMinimumKubeletVersion = newFeatureGate("MinimumKubeletVersion"). + reportProblemsToJiraComponent("Node"). + contactPerson("haircommander"). + productScope(ocpSpecific). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enhancementPR("https://github.com/openshift/enhancements/pull/1697"). + mustRegister() + + FeatureGateNutanixMultiSubnets = newFeatureGate("NutanixMultiSubnets"). + reportProblemsToJiraComponent("Cloud Compute / Nutanix Provider"). + contactPerson("yanhli"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1711"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateKMSEncryptionProvider = newFeatureGate("KMSEncryptionProvider"). + reportProblemsToJiraComponent("kube-apiserver"). + contactPerson("swghosh"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1682"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateHighlyAvailableArbiter = newFeatureGate("HighlyAvailableArbiter"). + reportProblemsToJiraComponent("TwoNode / Arbiter"). + contactPerson("eggfoobar"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1674"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() +) diff --git a/vendor/github.com/openshift/api/features/legacyfeaturegates.go b/vendor/github.com/openshift/api/features/legacyfeaturegates.go new file mode 100644 index 0000000000000..06e1600e0d912 --- /dev/null +++ b/vendor/github.com/openshift/api/features/legacyfeaturegates.go @@ -0,0 +1,129 @@ +package features + +import "k8s.io/apimachinery/pkg/util/sets" + +var legacyFeatureGates = sets.New( + "AWSClusterHostedDNS", + // never add to this list, if you think you have an exception ask @deads2k + "AWSEFSDriverVolumeMetrics", + // never add to this list, if you think you have an exception ask @deads2k + "AdditionalRoutingCapabilities", + // never add to this list, if you think you have an exception ask @deads2k + "AdminNetworkPolicy", + // never add to this list, if you think you have an exception ask @deads2k + "AlibabaPlatform", + // never add to this list, if you think you have an exception ask @deads2k + "AutomatedEtcdBackup", + // never add to this list, if you think you have an exception ask @deads2k + "AzureWorkloadIdentity", + // never add to this list, if you think you have an exception ask @deads2k + "BareMetalLoadBalancer", + // never add to this list, if you think you have an exception ask @deads2k + "BootcNodeManagement", + // never add to this list, if you think you have an exception ask @deads2k + "BuildCSIVolumes", + // never add to this list, if you think you have an exception ask @deads2k + "ChunkSizeMiB", + // never add to this list, if you think you have an exception ask @deads2k + "ClusterAPIInstall", + // never add to this list, if you think you have an exception ask @deads2k + "ClusterAPIInstallIBMCloud", + // never add to this list, if you think you have an exception ask @deads2k + "ClusterMonitoringConfig", + // never add to this list, if you think you have an exception ask @deads2k + "DNSNameResolver", + // never add to this list, if you think you have an exception ask @deads2k + "EtcdBackendQuota", + // never add to this list, if you think you have an exception ask @deads2k + "Example", + // never add to this list, if you think you have an exception ask @deads2k + "GCPClusterHostedDNS", + // never add to this list, if you think you have an exception ask @deads2k + "GCPLabelsTags", + // never add to this list, if you think you have an exception ask @deads2k + "GatewayAPI", + // never add to this list, if you think you have an exception ask @deads2k + "HardwareSpeed", + // never add to this list, if you think you have an exception ask @deads2k + "ImageStreamImportMode", + // never add to this list, if you think you have an exception ask @deads2k + "IngressControllerDynamicConfigurationManager", + // never add to this list, if you think you have an exception ask @deads2k + "IngressControllerLBSubnetsAWS", + // never add to this list, if you think you have an exception ask @deads2k + "InsightsConfig", + // never add to this list, if you think you have an exception ask @deads2k + "InsightsConfigAPI", + // never add to this list, if you think you have an exception ask @deads2k + "InsightsOnDemandDataGather", + // never add to this list, if you think you have an exception ask @deads2k + "InsightsRuntimeExtractor", + // never add to this list, if you think you have an exception ask @deads2k + "KMSv1", + // never add to this list, if you think you have an exception ask @deads2k + "MachineAPIMigration", + // never add to this list, if you think you have an exception ask @deads2k + "MachineAPIOperatorDisableMachineHealthCheckController", + // never add to this list, if you think you have an exception ask @deads2k + "MachineAPIProviderOpenStack", + // never add to this list, if you think you have an exception ask @deads2k + "MachineConfigNodes", + // never add to this list, if you think you have an exception ask @deads2k + "ManagedBootImages", + // never add to this list, if you think you have an exception ask @deads2k + "ManagedBootImagesAWS", + // never add to this list, if you think you have an exception ask @deads2k + "MetricsCollectionProfiles", + // never add to this list, if you think you have an exception ask @deads2k + "MixedCPUsAllocation", + // never add to this list, if you think you have an exception ask @deads2k + "MultiArchInstallAWS", + // never add to this list, if you think you have an exception ask @deads2k + "MultiArchInstallAzure", + // never add to this list, if you think you have an exception ask @deads2k + "MultiArchInstallGCP", + // never add to this list, if you think you have an exception ask @deads2k + "NetworkDiagnosticsConfig", + // never add to this list, if you think you have an exception ask @deads2k + "NetworkLiveMigration", + // never add to this list, if you think you have an exception ask @deads2k + "NetworkSegmentation", + // never add to this list, if you think you have an exception ask @deads2k + "NewOLM", + // never add to this list, if you think you have an exception ask @deads2k + "NodeDisruptionPolicy", + // never add to this list, if you think you have an exception ask @deads2k + "OVNObservability", + // never add to this list, if you think you have an exception ask @deads2k + "OnClusterBuild", + // never add to this list, if you think you have an exception ask @deads2k + "PersistentIPsForVirtualization", + // never add to this list, if you think you have an exception ask @deads2k + "PinnedImages", + // never add to this list, if you think you have an exception ask @deads2k + "PlatformOperators", + // never add to this list, if you think you have an exception ask @deads2k + "PrivateHostedZoneAWS", + // never add to this list, if you think you have an exception ask @deads2k + "RouteAdvertisements", + // never add to this list, if you think you have an exception ask @deads2k + "RouteExternalCertificate", + // never add to this list, if you think you have an exception ask @deads2k + "SetEIPForNLBIngressController", + // never add to this list, if you think you have an exception ask @deads2k + "SignatureStores", + // never add to this list, if you think you have an exception ask @deads2k + "SigstoreImageVerification", + // never add to this list, if you think you have an exception ask @deads2k + "UpgradeStatus", + // never add to this list, if you think you have an exception ask @deads2k + "VSphereControlPlaneMachineSet", + // never add to this list, if you think you have an exception ask @deads2k + "VSphereDriverConfiguration", + // never add to this list, if you think you have an exception ask @deads2k + "VSphereMultiNetworks", + // never add to this list, if you think you have an exception ask @deads2k + "VSphereMultiVCenters", + // never add to this list, if you think you have an exception ask @deads2k + "VSphereStaticIPs", +) diff --git a/vendor/github.com/openshift/api/features/util.go b/vendor/github.com/openshift/api/features/util.go new file mode 100644 index 0000000000000..59bb7bff4078a --- /dev/null +++ b/vendor/github.com/openshift/api/features/util.go @@ -0,0 +1,224 @@ +package features + +import ( + "fmt" + configv1 "github.com/openshift/api/config/v1" + "net/url" + "strings" +) + +// FeatureGateDescription is a golang-only interface used to contains details for a feature gate. +type FeatureGateDescription struct { + // FeatureGateAttributes is the information that appears in the API + FeatureGateAttributes configv1.FeatureGateAttributes + + // OwningJiraComponent is the jira component that owns most of the impl and first assignment for the bug. + // This is the team that owns the feature long term. + OwningJiraComponent string + // ResponsiblePerson is the person who is on the hook for first contact. This is often, but not always, a team lead. + // It is someone who can make the promise on the behalf of the team. + ResponsiblePerson string + // OwningProduct is the product that owns the lifecycle of the gate. + OwningProduct OwningProduct + // EnhancementPR is the PR for the enhancement. + EnhancementPR string +} + +type FeatureGateEnabledDisabled struct { + Enabled []FeatureGateDescription + Disabled []FeatureGateDescription +} + +type ClusterProfileName string + +var ( + Hypershift = ClusterProfileName("include.release.openshift.io/ibm-cloud-managed") + SelfManaged = ClusterProfileName("include.release.openshift.io/self-managed-high-availability") + AllClusterProfiles = []ClusterProfileName{Hypershift, SelfManaged} +) + +type OwningProduct string + +var ( + ocpSpecific = OwningProduct("OCP") + kubernetes = OwningProduct("Kubernetes") +) + +type featureGateBuilder struct { + name string + owningJiraComponent string + responsiblePerson string + owningProduct OwningProduct + enhancementPRURL string + + statusByClusterProfileByFeatureSet map[ClusterProfileName]map[configv1.FeatureSet]bool +} + +const ( + legacyFeatureGateWithoutEnhancement = "FeatureGate predates 4.18" +) + +// newFeatureGate featuregate are disabled in every FeatureSet and selectively enabled +func newFeatureGate(name string) *featureGateBuilder { + b := &featureGateBuilder{ + name: name, + statusByClusterProfileByFeatureSet: map[ClusterProfileName]map[configv1.FeatureSet]bool{}, + } + for _, clusterProfile := range AllClusterProfiles { + byFeatureSet := map[configv1.FeatureSet]bool{} + for _, featureSet := range configv1.AllFixedFeatureSets { + byFeatureSet[featureSet] = false + } + b.statusByClusterProfileByFeatureSet[clusterProfile] = byFeatureSet + } + return b +} + +func (b *featureGateBuilder) reportProblemsToJiraComponent(owningJiraComponent string) *featureGateBuilder { + b.owningJiraComponent = owningJiraComponent + return b +} + +func (b *featureGateBuilder) contactPerson(responsiblePerson string) *featureGateBuilder { + b.responsiblePerson = responsiblePerson + return b +} + +func (b *featureGateBuilder) productScope(owningProduct OwningProduct) *featureGateBuilder { + b.owningProduct = owningProduct + return b +} + +func (b *featureGateBuilder) enhancementPR(url string) *featureGateBuilder { + b.enhancementPRURL = url + return b +} + +func (b *featureGateBuilder) enableIn(featureSets ...configv1.FeatureSet) *featureGateBuilder { + for clusterProfile := range b.statusByClusterProfileByFeatureSet { + for _, featureSet := range featureSets { + b.statusByClusterProfileByFeatureSet[clusterProfile][featureSet] = true + } + } + return b +} + +func (b *featureGateBuilder) enableForClusterProfile(clusterProfile ClusterProfileName, featureSets ...configv1.FeatureSet) *featureGateBuilder { + for _, featureSet := range featureSets { + b.statusByClusterProfileByFeatureSet[clusterProfile][featureSet] = true + } + return b +} + +func (b *featureGateBuilder) register() (configv1.FeatureGateName, error) { + if len(b.name) == 0 { + return "", fmt.Errorf("missing name") + } + if len(b.owningJiraComponent) == 0 { + return "", fmt.Errorf("missing owningJiraComponent") + } + if len(b.responsiblePerson) == 0 { + return "", fmt.Errorf("missing responsiblePerson") + } + if len(b.owningProduct) == 0 { + return "", fmt.Errorf("missing owningProduct") + } + _, enhancementPRErr := url.Parse(b.enhancementPRURL) + switch { + case b.enhancementPRURL == legacyFeatureGateWithoutEnhancement: + if !legacyFeatureGates.Has(b.name) { + return "", fmt.Errorf("FeatureGate/%s is a new feature gate, not an existing one. It must have an enhancementPR with GA Graduation Criteria like https://github.com/openshift/enhancements/pull/#### or https://github.com/kubernetes/enhancements/issues/####", b.name) + } + + case len(b.enhancementPRURL) == 0: + return "", fmt.Errorf("FeatureGate/%s is missing an enhancementPR with GA Graduation Criteria like https://github.com/openshift/enhancements/pull/#### or https://github.com/kubernetes/enhancements/issues/####", b.name) + + case !strings.HasPrefix(b.enhancementPRURL, "https://github.com/openshift/enhancements/pull/") && !strings.HasPrefix(b.enhancementPRURL, "https://github.com/kubernetes/enhancements/issues/"): + return "", fmt.Errorf("FeatureGate/%s enhancementPR format is incorrect; must be like https://github.com/openshift/enhancements/pull/#### or https://github.com/kubernetes/enhancements/issues/####", b.name) + + case enhancementPRErr != nil: + return "", fmt.Errorf("FeatureGate/%s is enhancementPR is invalid: %w", b.name, enhancementPRErr) + } + + featureGateName := configv1.FeatureGateName(b.name) + description := FeatureGateDescription{ + FeatureGateAttributes: configv1.FeatureGateAttributes{ + Name: featureGateName, + }, + OwningJiraComponent: b.owningJiraComponent, + ResponsiblePerson: b.responsiblePerson, + OwningProduct: b.owningProduct, + EnhancementPR: b.enhancementPRURL, + } + + // statusByClusterProfileByFeatureSet is initialized by constructor to be false for every combination + for clusterProfile, byFeatureSet := range b.statusByClusterProfileByFeatureSet { + for featureSet, enabled := range byFeatureSet { + if _, ok := allFeatureGates[clusterProfile]; !ok { + allFeatureGates[clusterProfile] = map[configv1.FeatureSet]*FeatureGateEnabledDisabled{} + } + if _, ok := allFeatureGates[clusterProfile][featureSet]; !ok { + allFeatureGates[clusterProfile][featureSet] = &FeatureGateEnabledDisabled{} + } + + if enabled { + allFeatureGates[clusterProfile][featureSet].Enabled = append(allFeatureGates[clusterProfile][featureSet].Enabled, description) + } else { + allFeatureGates[clusterProfile][featureSet].Disabled = append(allFeatureGates[clusterProfile][featureSet].Disabled, description) + } + } + } + + return featureGateName, nil +} + +func (b *featureGateBuilder) mustRegister() configv1.FeatureGateName { + ret, err := b.register() + if err != nil { + panic(err) + } + return ret +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateEnabledDisabled) DeepCopyInto(out *FeatureGateEnabledDisabled) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]FeatureGateDescription, len(*in)) + copy(*out, *in) + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]FeatureGateDescription, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateEnabledDisabled. +func (in *FeatureGateEnabledDisabled) DeepCopy() *FeatureGateEnabledDisabled { + if in == nil { + return nil + } + out := new(FeatureGateEnabledDisabled) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateDescription) DeepCopyInto(out *FeatureGateDescription) { + *out = *in + out.FeatureGateAttributes = in.FeatureGateAttributes + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateDescription. +func (in *FeatureGateDescription) DeepCopy() *FeatureGateDescription { + if in == nil { + return nil + } + out := new(FeatureGateDescription) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/image/docker10/doc.go b/vendor/github.com/openshift/api/image/docker10/doc.go new file mode 100644 index 0000000000000..cc194d24db2ca --- /dev/null +++ b/vendor/github.com/openshift/api/image/docker10/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package docker10 is the docker10 version of the API. +package docker10 diff --git a/vendor/github.com/openshift/api/image/docker10/register.go b/vendor/github.com/openshift/api/image/docker10/register.go new file mode 100644 index 0000000000000..3d5ad268ae1ae --- /dev/null +++ b/vendor/github.com/openshift/api/image/docker10/register.go @@ -0,0 +1,47 @@ +package docker10 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + GroupName = "image.openshift.io" + LegacyGroupName = "" +) + +// SchemeGroupVersion is group version used to register these objects +var ( + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "1.0"} + LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "1.0"} + + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + LegacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes) + + AddToSchemeInCoreGroup = LegacySchemeBuilder.AddToScheme + + // Install is a function which adds this version to a scheme + Install = SchemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &DockerImage{}, + ) + return nil +} + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(LegacySchemeGroupVersion, + &DockerImage{}, + ) + return nil +} diff --git a/vendor/github.com/openshift/api/image/docker10/types_docker.go b/vendor/github.com/openshift/api/image/docker10/types_docker.go new file mode 100644 index 0000000000000..03f0f67fcc5c0 --- /dev/null +++ b/vendor/github.com/openshift/api/image/docker10/types_docker.go @@ -0,0 +1,60 @@ +package docker10 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DockerImage is the type representing a container image and its various properties when +// retrieved from the Docker client API. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type DockerImage struct { + metav1.TypeMeta `json:",inline"` + + ID string `json:"Id"` + Parent string `json:"Parent,omitempty"` + Comment string `json:"Comment,omitempty"` + Created metav1.Time `json:"Created,omitempty"` + Container string `json:"Container,omitempty"` + ContainerConfig DockerConfig `json:"ContainerConfig,omitempty"` + DockerVersion string `json:"DockerVersion,omitempty"` + Author string `json:"Author,omitempty"` + Config *DockerConfig `json:"Config,omitempty"` + Architecture string `json:"Architecture,omitempty"` + Size int64 `json:"Size,omitempty"` +} + +// DockerConfig is the list of configuration options used when creating a container. +type DockerConfig struct { + Hostname string `json:"Hostname,omitempty"` + Domainname string `json:"Domainname,omitempty"` + User string `json:"User,omitempty"` + Memory int64 `json:"Memory,omitempty"` + MemorySwap int64 `json:"MemorySwap,omitempty"` + CPUShares int64 `json:"CpuShares,omitempty"` + CPUSet string `json:"Cpuset,omitempty"` + AttachStdin bool `json:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty"` + AttachStderr bool `json:"AttachStderr,omitempty"` + PortSpecs []string `json:"PortSpecs,omitempty"` + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + Tty bool `json:"Tty,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty"` + StdinOnce bool `json:"StdinOnce,omitempty"` + Env []string `json:"Env,omitempty"` + Cmd []string `json:"Cmd,omitempty"` + DNS []string `json:"Dns,omitempty"` // For Docker API v1.9 and below only + Image string `json:"Image,omitempty"` + Volumes map[string]struct{} `json:"Volumes,omitempty"` + VolumesFrom string `json:"VolumesFrom,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty"` + Entrypoint []string `json:"Entrypoint,omitempty"` + NetworkDisabled bool `json:"NetworkDisabled,omitempty"` + SecurityOpts []string `json:"SecurityOpts,omitempty"` + OnBuild []string `json:"OnBuild,omitempty"` + Labels map[string]string `json:"Labels,omitempty"` +} diff --git a/vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..2ce8330b2c433 --- /dev/null +++ b/vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go @@ -0,0 +1,114 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package docker10 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerConfig) DeepCopyInto(out *DockerConfig) { + *out = *in + if in.PortSpecs != nil { + in, out := &in.PortSpecs, &out.PortSpecs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExposedPorts != nil { + in, out := &in.ExposedPorts, &out.ExposedPorts + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Cmd != nil { + in, out := &in.Cmd, &out.Cmd + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecurityOpts != nil { + in, out := &in.SecurityOpts, &out.SecurityOpts + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OnBuild != nil { + in, out := &in.OnBuild, &out.OnBuild + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfig. +func (in *DockerConfig) DeepCopy() *DockerConfig { + if in == nil { + return nil + } + out := new(DockerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerImage) DeepCopyInto(out *DockerImage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Created.DeepCopyInto(&out.Created) + in.ContainerConfig.DeepCopyInto(&out.ContainerConfig) + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(DockerConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImage. +func (in *DockerImage) DeepCopy() *DockerImage { + if in == nil { + return nil + } + out := new(DockerImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DockerImage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/github.com/openshift/api/image/docker10/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/image/docker10/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000..e818f784ab364 --- /dev/null +++ b/vendor/github.com/openshift/api/image/docker10/zz_generated.swagger_doc_generated.go @@ -0,0 +1,30 @@ +package docker10 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_DockerConfig = map[string]string{ + "": "DockerConfig is the list of configuration options used when creating a container.", +} + +func (DockerConfig) SwaggerDoc() map[string]string { + return map_DockerConfig +} + +var map_DockerImage = map[string]string{ + "": "DockerImage is the type representing a container image and its various properties when retrieved from the Docker client API.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", +} + +func (DockerImage) SwaggerDoc() map[string]string { + return map_DockerImage +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/image/dockerpre012/deepcopy.go b/vendor/github.com/openshift/api/image/dockerpre012/deepcopy.go new file mode 100644 index 0000000000000..ddeb4403c4d9f --- /dev/null +++ b/vendor/github.com/openshift/api/image/dockerpre012/deepcopy.go @@ -0,0 +1,18 @@ +package dockerpre012 + +// DeepCopyInto is manually built to copy the (probably bugged) time.Time +func (in *ImagePre012) DeepCopyInto(out *ImagePre012) { + *out = *in + out.Created = in.Created + in.ContainerConfig.DeepCopyInto(&out.ContainerConfig) + if in.Config != nil { + in, out := &in.Config, &out.Config + if *in == nil { + *out = nil + } else { + *out = new(Config) + (*in).DeepCopyInto(*out) + } + } + return +} diff --git a/vendor/github.com/openshift/api/image/dockerpre012/doc.go b/vendor/github.com/openshift/api/image/dockerpre012/doc.go new file mode 100644 index 0000000000000..e4a56260f10b9 --- /dev/null +++ b/vendor/github.com/openshift/api/image/dockerpre012/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package dockerpre012 is the dockerpre012 version of the API. +package dockerpre012 diff --git a/vendor/github.com/openshift/api/image/dockerpre012/register.go b/vendor/github.com/openshift/api/image/dockerpre012/register.go new file mode 100644 index 0000000000000..7ce2adb0ad1a0 --- /dev/null +++ b/vendor/github.com/openshift/api/image/dockerpre012/register.go @@ -0,0 +1,46 @@ +package dockerpre012 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + GroupName = "image.openshift.io" + LegacyGroupName = "" +) + +var ( + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "pre012"} + LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "pre012"} + + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + + LegacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes) + AddToSchemeInCoreGroup = LegacySchemeBuilder.AddToScheme + + // Install is a function which adds this version to a scheme + Install = SchemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &DockerImage{}, + ) + return nil +} + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(LegacySchemeGroupVersion, + &DockerImage{}, + ) + return nil +} diff --git a/vendor/github.com/openshift/api/image/dockerpre012/types_docker.go b/vendor/github.com/openshift/api/image/dockerpre012/types_docker.go new file mode 100644 index 0000000000000..1111892a97165 --- /dev/null +++ b/vendor/github.com/openshift/api/image/dockerpre012/types_docker.go @@ -0,0 +1,140 @@ +package dockerpre012 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DockerImage is for earlier versions of the Docker API (pre-012 to be specific). It is also the +// version of metadata that the container image registry uses to persist metadata. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type DockerImage struct { + metav1.TypeMeta `json:",inline"` + + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created metav1.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig DockerConfig `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + Config *DockerConfig `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + Size int64 `json:"size,omitempty"` +} + +// DockerConfig is the list of configuration options used when creating a container. +type DockerConfig struct { + Hostname string `json:"Hostname,omitempty"` + Domainname string `json:"Domainname,omitempty"` + User string `json:"User,omitempty"` + Memory int64 `json:"Memory,omitempty"` + MemorySwap int64 `json:"MemorySwap,omitempty"` + CPUShares int64 `json:"CpuShares,omitempty"` + CPUSet string `json:"Cpuset,omitempty"` + AttachStdin bool `json:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty"` + AttachStderr bool `json:"AttachStderr,omitempty"` + PortSpecs []string `json:"PortSpecs,omitempty"` + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + Tty bool `json:"Tty,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty"` + StdinOnce bool `json:"StdinOnce,omitempty"` + Env []string `json:"Env,omitempty"` + Cmd []string `json:"Cmd,omitempty"` + DNS []string `json:"Dns,omitempty"` // For Docker API v1.9 and below only + Image string `json:"Image,omitempty"` + Volumes map[string]struct{} `json:"Volumes,omitempty"` + VolumesFrom string `json:"VolumesFrom,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty"` + Entrypoint []string `json:"Entrypoint,omitempty"` + NetworkDisabled bool `json:"NetworkDisabled,omitempty"` + SecurityOpts []string `json:"SecurityOpts,omitempty"` + OnBuild []string `json:"OnBuild,omitempty"` + // This field is not supported in pre012 and will always be empty. + Labels map[string]string `json:"Labels,omitempty"` +} + +// ImagePre012 serves the same purpose as the Image type except that it is for +// earlier versions of the Docker API (pre-012 to be specific) +// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient +type ImagePre012 struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig Config `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + Config *Config `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + Size int64 `json:"size,omitempty"` +} + +// Config is the list of configuration options used when creating a container. +// Config does not contain the options that are specific to starting a container on a +// given host. Those are contained in HostConfig +// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient +type Config struct { + Hostname string `json:"Hostname,omitempty" yaml:"Hostname,omitempty"` + Domainname string `json:"Domainname,omitempty" yaml:"Domainname,omitempty"` + User string `json:"User,omitempty" yaml:"User,omitempty"` + Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"` + MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"` + MemoryReservation int64 `json:"MemoryReservation,omitempty" yaml:"MemoryReservation,omitempty"` + KernelMemory int64 `json:"KernelMemory,omitempty" yaml:"KernelMemory,omitempty"` + PidsLimit int64 `json:"PidsLimit,omitempty" yaml:"PidsLimit,omitempty"` + CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"` + CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"` + AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"` + AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"` + PortSpecs []string `json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty"` + ExposedPorts map[Port]struct{} `json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty"` + StopSignal string `json:"StopSignal,omitempty" yaml:"StopSignal,omitempty"` + Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"` + StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty"` + Env []string `json:"Env,omitempty" yaml:"Env,omitempty"` + Cmd []string `json:"Cmd" yaml:"Cmd"` + DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.9 and below only + Image string `json:"Image,omitempty" yaml:"Image,omitempty"` + Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty"` + VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"` + VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty"` + MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"` + Entrypoint []string `json:"Entrypoint" yaml:"Entrypoint"` + NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty"` + SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty"` + OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty"` + Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"` +} + +// Mount represents a mount point in the container. +// +// It has been added in the version 1.20 of the Docker API, available since +// Docker 1.8. +// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient +type Mount struct { + Name string + Source string + Destination string + Driver string + Mode string + RW bool +} + +// Port represents the port number and the protocol, in the form +// /. For example: 80/tcp. +// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient +type Port string diff --git a/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..0e8ecb20d5a34 --- /dev/null +++ b/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go @@ -0,0 +1,217 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package dockerpre012 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + if in.PortSpecs != nil { + in, out := &in.PortSpecs, &out.PortSpecs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExposedPorts != nil { + in, out := &in.ExposedPorts, &out.ExposedPorts + *out = make(map[Port]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Cmd != nil { + in, out := &in.Cmd, &out.Cmd + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecurityOpts != nil { + in, out := &in.SecurityOpts, &out.SecurityOpts + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OnBuild != nil { + in, out := &in.OnBuild, &out.OnBuild + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Mounts != nil { + in, out := &in.Mounts, &out.Mounts + *out = make([]Mount, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerConfig) DeepCopyInto(out *DockerConfig) { + *out = *in + if in.PortSpecs != nil { + in, out := &in.PortSpecs, &out.PortSpecs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExposedPorts != nil { + in, out := &in.ExposedPorts, &out.ExposedPorts + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Cmd != nil { + in, out := &in.Cmd, &out.Cmd + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecurityOpts != nil { + in, out := &in.SecurityOpts, &out.SecurityOpts + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OnBuild != nil { + in, out := &in.OnBuild, &out.OnBuild + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfig. +func (in *DockerConfig) DeepCopy() *DockerConfig { + if in == nil { + return nil + } + out := new(DockerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerImage) DeepCopyInto(out *DockerImage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Created.DeepCopyInto(&out.Created) + in.ContainerConfig.DeepCopyInto(&out.ContainerConfig) + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(DockerConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImage. +func (in *DockerImage) DeepCopy() *DockerImage { + if in == nil { + return nil + } + out := new(DockerImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DockerImage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePre012. +func (in *ImagePre012) DeepCopy() *ImagePre012 { + if in == nil { + return nil + } + out := new(ImagePre012) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mount) DeepCopyInto(out *Mount) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mount. +func (in *Mount) DeepCopy() *Mount { + if in == nil { + return nil + } + out := new(Mount) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000..04900e809c921 --- /dev/null +++ b/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.swagger_doc_generated.go @@ -0,0 +1,55 @@ +package dockerpre012 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Config = map[string]string{ + "": "Config is the list of configuration options used when creating a container. Config does not contain the options that are specific to starting a container on a given host. Those are contained in HostConfig Exists only for legacy conversion, copy of type from fsouza/go-dockerclient", +} + +func (Config) SwaggerDoc() map[string]string { + return map_Config +} + +var map_DockerConfig = map[string]string{ + "": "DockerConfig is the list of configuration options used when creating a container.", + "Labels": "This field is not supported in pre012 and will always be empty.", +} + +func (DockerConfig) SwaggerDoc() map[string]string { + return map_DockerConfig +} + +var map_DockerImage = map[string]string{ + "": "DockerImage is for earlier versions of the Docker API (pre-012 to be specific). It is also the version of metadata that the container image registry uses to persist metadata.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", +} + +func (DockerImage) SwaggerDoc() map[string]string { + return map_DockerImage +} + +var map_ImagePre012 = map[string]string{ + "": "ImagePre012 serves the same purpose as the Image type except that it is for earlier versions of the Docker API (pre-012 to be specific) Exists only for legacy conversion, copy of type from fsouza/go-dockerclient", +} + +func (ImagePre012) SwaggerDoc() map[string]string { + return map_ImagePre012 +} + +var map_Mount = map[string]string{ + "": "Mount represents a mount point in the container.\n\nIt has been added in the version 1.20 of the Docker API, available since Docker 1.8. Exists only for legacy conversion, copy of type from fsouza/go-dockerclient", +} + +func (Mount) SwaggerDoc() map[string]string { + return map_Mount +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/image/v1/consts.go b/vendor/github.com/openshift/api/image/v1/consts.go new file mode 100644 index 0000000000000..11f57a44a3693 --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/consts.go @@ -0,0 +1,69 @@ +package v1 + +import corev1 "k8s.io/api/core/v1" + +const ( + // ManagedByOpenShiftAnnotation indicates that an image is managed by OpenShift's registry. + ManagedByOpenShiftAnnotation = "openshift.io/image.managed" + + // DockerImageRepositoryCheckAnnotation indicates that OpenShift has + // attempted to import tag and image information from an external Docker + // image repository. + DockerImageRepositoryCheckAnnotation = "openshift.io/image.dockerRepositoryCheck" + + // InsecureRepositoryAnnotation may be set true on an image stream to allow insecure access to pull content. + InsecureRepositoryAnnotation = "openshift.io/image.insecureRepository" + + // ExcludeImageSecretAnnotation indicates that a secret should not be returned by imagestream/secrets. + ExcludeImageSecretAnnotation = "openshift.io/image.excludeSecret" + + // DockerImageLayersOrderAnnotation describes layers order in the docker image. + DockerImageLayersOrderAnnotation = "image.openshift.io/dockerLayersOrder" + + // DockerImageLayersOrderAscending indicates that image layers are sorted in + // the order of their addition (from oldest to latest) + DockerImageLayersOrderAscending = "ascending" + + // DockerImageLayersOrderDescending indicates that layers are sorted in + // reversed order of their addition (from newest to oldest). + DockerImageLayersOrderDescending = "descending" + + // ImporterPreferArchAnnotation represents an architecture that should be + // selected if an image uses a manifest list and it should be + // downconverted. + ImporterPreferArchAnnotation = "importer.image.openshift.io/prefer-arch" + + // ImporterPreferOSAnnotation represents an operation system that should + // be selected if an image uses a manifest list and it should be + // downconverted. + ImporterPreferOSAnnotation = "importer.image.openshift.io/prefer-os" + + // ImageManifestBlobStoredAnnotation indicates that manifest and config blobs of image are stored in on + // storage of integrated Docker registry. + ImageManifestBlobStoredAnnotation = "image.openshift.io/manifestBlobStored" + + // DefaultImageTag is used when an image tag is needed and the configuration does not specify a tag to use. + DefaultImageTag = "latest" + + // ResourceImageStreams represents a number of image streams in a project. + ResourceImageStreams corev1.ResourceName = "openshift.io/imagestreams" + + // ResourceImageStreamImages represents a number of unique references to images in all image stream + // statuses of a project. + ResourceImageStreamImages corev1.ResourceName = "openshift.io/images" + + // ResourceImageStreamTags represents a number of unique references to images in all image stream specs + // of a project. + ResourceImageStreamTags corev1.ResourceName = "openshift.io/image-tags" + + // Limit that applies to images. Used with a max["storage"] LimitRangeItem to set + // the maximum size of an image. + LimitTypeImage corev1.LimitType = "openshift.io/Image" + + // Limit that applies to image streams. Used with a max[resource] LimitRangeItem to set the maximum number + // of resource. Where the resource is one of "openshift.io/images" and "openshift.io/image-tags". + LimitTypeImageStream corev1.LimitType = "openshift.io/ImageStream" + + // The supported type of image signature. + ImageSignatureTypeAtomicImageV1 string = "AtomicImageV1" +) diff --git a/vendor/github.com/openshift/api/image/v1/doc.go b/vendor/github.com/openshift/api/image/v1/doc.go new file mode 100644 index 0000000000000..e57d45bbf9440 --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/image/apis/image +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=image.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/image/v1/generated.pb.go b/vendor/github.com/openshift/api/image/v1/generated.pb.go new file mode 100644 index 0000000000000..ac776ad64d98c --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/generated.pb.go @@ -0,0 +1,11572 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/image/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *DockerImageReference) Reset() { *m = DockerImageReference{} } +func (*DockerImageReference) ProtoMessage() {} +func (*DockerImageReference) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{0} +} +func (m *DockerImageReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DockerImageReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DockerImageReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_DockerImageReference.Merge(m, src) +} +func (m *DockerImageReference) XXX_Size() int { + return m.Size() +} +func (m *DockerImageReference) XXX_DiscardUnknown() { + xxx_messageInfo_DockerImageReference.DiscardUnknown(m) +} + +var xxx_messageInfo_DockerImageReference proto.InternalMessageInfo + +func (m *Image) Reset() { *m = Image{} } +func (*Image) ProtoMessage() {} +func (*Image) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{1} +} +func (m *Image) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Image) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Image) XXX_Merge(src proto.Message) { + xxx_messageInfo_Image.Merge(m, src) +} +func (m *Image) XXX_Size() int { + return m.Size() +} +func (m *Image) XXX_DiscardUnknown() { + xxx_messageInfo_Image.DiscardUnknown(m) +} + +var xxx_messageInfo_Image proto.InternalMessageInfo + +func (m *ImageBlobReferences) Reset() { *m = ImageBlobReferences{} } +func (*ImageBlobReferences) ProtoMessage() {} +func (*ImageBlobReferences) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{2} +} +func (m *ImageBlobReferences) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageBlobReferences) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageBlobReferences) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageBlobReferences.Merge(m, src) +} +func (m *ImageBlobReferences) XXX_Size() int { + return m.Size() +} +func (m *ImageBlobReferences) XXX_DiscardUnknown() { + xxx_messageInfo_ImageBlobReferences.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageBlobReferences proto.InternalMessageInfo + +func (m *ImageImportSpec) Reset() { *m = ImageImportSpec{} } +func (*ImageImportSpec) ProtoMessage() {} +func (*ImageImportSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{3} +} +func (m *ImageImportSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageImportSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageImportSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageImportSpec.Merge(m, src) +} +func (m *ImageImportSpec) XXX_Size() int { + return m.Size() +} +func (m *ImageImportSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ImageImportSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageImportSpec proto.InternalMessageInfo + +func (m *ImageImportStatus) Reset() { *m = ImageImportStatus{} } +func (*ImageImportStatus) ProtoMessage() {} +func (*ImageImportStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{4} +} +func (m *ImageImportStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageImportStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageImportStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageImportStatus.Merge(m, src) +} +func (m *ImageImportStatus) XXX_Size() int { + return m.Size() +} +func (m *ImageImportStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ImageImportStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageImportStatus proto.InternalMessageInfo + +func (m *ImageLayer) Reset() { *m = ImageLayer{} } +func (*ImageLayer) ProtoMessage() {} +func (*ImageLayer) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{5} +} +func (m *ImageLayer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageLayer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageLayer) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageLayer.Merge(m, src) +} +func (m *ImageLayer) XXX_Size() int { + return m.Size() +} +func (m *ImageLayer) XXX_DiscardUnknown() { + xxx_messageInfo_ImageLayer.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageLayer proto.InternalMessageInfo + +func (m *ImageLayerData) Reset() { *m = ImageLayerData{} } +func (*ImageLayerData) ProtoMessage() {} +func (*ImageLayerData) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{6} +} +func (m *ImageLayerData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageLayerData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageLayerData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageLayerData.Merge(m, src) +} +func (m *ImageLayerData) XXX_Size() int { + return m.Size() +} +func (m *ImageLayerData) XXX_DiscardUnknown() { + xxx_messageInfo_ImageLayerData.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageLayerData proto.InternalMessageInfo + +func (m *ImageList) Reset() { *m = ImageList{} } +func (*ImageList) ProtoMessage() {} +func (*ImageList) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{7} +} +func (m *ImageList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageList.Merge(m, src) +} +func (m *ImageList) XXX_Size() int { + return m.Size() +} +func (m *ImageList) XXX_DiscardUnknown() { + xxx_messageInfo_ImageList.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageList proto.InternalMessageInfo + +func (m *ImageLookupPolicy) Reset() { *m = ImageLookupPolicy{} } +func (*ImageLookupPolicy) ProtoMessage() {} +func (*ImageLookupPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{8} +} +func (m *ImageLookupPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageLookupPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageLookupPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageLookupPolicy.Merge(m, src) +} +func (m *ImageLookupPolicy) XXX_Size() int { + return m.Size() +} +func (m *ImageLookupPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_ImageLookupPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageLookupPolicy proto.InternalMessageInfo + +func (m *ImageManifest) Reset() { *m = ImageManifest{} } +func (*ImageManifest) ProtoMessage() {} +func (*ImageManifest) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{9} +} +func (m *ImageManifest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageManifest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageManifest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageManifest.Merge(m, src) +} +func (m *ImageManifest) XXX_Size() int { + return m.Size() +} +func (m *ImageManifest) XXX_DiscardUnknown() { + xxx_messageInfo_ImageManifest.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageManifest proto.InternalMessageInfo + +func (m *ImageSignature) Reset() { *m = ImageSignature{} } +func (*ImageSignature) ProtoMessage() {} +func (*ImageSignature) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{10} +} +func (m *ImageSignature) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageSignature) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageSignature) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageSignature.Merge(m, src) +} +func (m *ImageSignature) XXX_Size() int { + return m.Size() +} +func (m *ImageSignature) XXX_DiscardUnknown() { + xxx_messageInfo_ImageSignature.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageSignature proto.InternalMessageInfo + +func (m *ImageStream) Reset() { *m = ImageStream{} } +func (*ImageStream) ProtoMessage() {} +func (*ImageStream) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{11} +} +func (m *ImageStream) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStream) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStream.Merge(m, src) +} +func (m *ImageStream) XXX_Size() int { + return m.Size() +} +func (m *ImageStream) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStream.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStream proto.InternalMessageInfo + +func (m *ImageStreamImage) Reset() { *m = ImageStreamImage{} } +func (*ImageStreamImage) ProtoMessage() {} +func (*ImageStreamImage) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{12} +} +func (m *ImageStreamImage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamImage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamImage) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamImage.Merge(m, src) +} +func (m *ImageStreamImage) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamImage) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamImage.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamImage proto.InternalMessageInfo + +func (m *ImageStreamImport) Reset() { *m = ImageStreamImport{} } +func (*ImageStreamImport) ProtoMessage() {} +func (*ImageStreamImport) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{13} +} +func (m *ImageStreamImport) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamImport) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamImport) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamImport.Merge(m, src) +} +func (m *ImageStreamImport) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamImport) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamImport.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamImport proto.InternalMessageInfo + +func (m *ImageStreamImportSpec) Reset() { *m = ImageStreamImportSpec{} } +func (*ImageStreamImportSpec) ProtoMessage() {} +func (*ImageStreamImportSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{14} +} +func (m *ImageStreamImportSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamImportSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamImportSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamImportSpec.Merge(m, src) +} +func (m *ImageStreamImportSpec) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamImportSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamImportSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamImportSpec proto.InternalMessageInfo + +func (m *ImageStreamImportStatus) Reset() { *m = ImageStreamImportStatus{} } +func (*ImageStreamImportStatus) ProtoMessage() {} +func (*ImageStreamImportStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{15} +} +func (m *ImageStreamImportStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamImportStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamImportStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamImportStatus.Merge(m, src) +} +func (m *ImageStreamImportStatus) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamImportStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamImportStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamImportStatus proto.InternalMessageInfo + +func (m *ImageStreamLayers) Reset() { *m = ImageStreamLayers{} } +func (*ImageStreamLayers) ProtoMessage() {} +func (*ImageStreamLayers) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{16} +} +func (m *ImageStreamLayers) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamLayers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamLayers) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamLayers.Merge(m, src) +} +func (m *ImageStreamLayers) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamLayers) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamLayers.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamLayers proto.InternalMessageInfo + +func (m *ImageStreamList) Reset() { *m = ImageStreamList{} } +func (*ImageStreamList) ProtoMessage() {} +func (*ImageStreamList) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{17} +} +func (m *ImageStreamList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamList.Merge(m, src) +} +func (m *ImageStreamList) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamList) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamList.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamList proto.InternalMessageInfo + +func (m *ImageStreamMapping) Reset() { *m = ImageStreamMapping{} } +func (*ImageStreamMapping) ProtoMessage() {} +func (*ImageStreamMapping) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{18} +} +func (m *ImageStreamMapping) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamMapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamMapping) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamMapping.Merge(m, src) +} +func (m *ImageStreamMapping) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamMapping) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamMapping.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamMapping proto.InternalMessageInfo + +func (m *ImageStreamSpec) Reset() { *m = ImageStreamSpec{} } +func (*ImageStreamSpec) ProtoMessage() {} +func (*ImageStreamSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{19} +} +func (m *ImageStreamSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamSpec.Merge(m, src) +} +func (m *ImageStreamSpec) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamSpec proto.InternalMessageInfo + +func (m *ImageStreamStatus) Reset() { *m = ImageStreamStatus{} } +func (*ImageStreamStatus) ProtoMessage() {} +func (*ImageStreamStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{20} +} +func (m *ImageStreamStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamStatus.Merge(m, src) +} +func (m *ImageStreamStatus) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamStatus proto.InternalMessageInfo + +func (m *ImageStreamTag) Reset() { *m = ImageStreamTag{} } +func (*ImageStreamTag) ProtoMessage() {} +func (*ImageStreamTag) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{21} +} +func (m *ImageStreamTag) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamTag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamTag) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamTag.Merge(m, src) +} +func (m *ImageStreamTag) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamTag) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamTag.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamTag proto.InternalMessageInfo + +func (m *ImageStreamTagList) Reset() { *m = ImageStreamTagList{} } +func (*ImageStreamTagList) ProtoMessage() {} +func (*ImageStreamTagList) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{22} +} +func (m *ImageStreamTagList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamTagList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamTagList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamTagList.Merge(m, src) +} +func (m *ImageStreamTagList) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamTagList) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamTagList.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamTagList proto.InternalMessageInfo + +func (m *ImageTag) Reset() { *m = ImageTag{} } +func (*ImageTag) ProtoMessage() {} +func (*ImageTag) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{23} +} +func (m *ImageTag) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageTag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageTag) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageTag.Merge(m, src) +} +func (m *ImageTag) XXX_Size() int { + return m.Size() +} +func (m *ImageTag) XXX_DiscardUnknown() { + xxx_messageInfo_ImageTag.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageTag proto.InternalMessageInfo + +func (m *ImageTagList) Reset() { *m = ImageTagList{} } +func (*ImageTagList) ProtoMessage() {} +func (*ImageTagList) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{24} +} +func (m *ImageTagList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageTagList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageTagList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageTagList.Merge(m, src) +} +func (m *ImageTagList) XXX_Size() int { + return m.Size() +} +func (m *ImageTagList) XXX_DiscardUnknown() { + xxx_messageInfo_ImageTagList.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageTagList proto.InternalMessageInfo + +func (m *NamedTagEventList) Reset() { *m = NamedTagEventList{} } +func (*NamedTagEventList) ProtoMessage() {} +func (*NamedTagEventList) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{25} +} +func (m *NamedTagEventList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedTagEventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamedTagEventList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedTagEventList.Merge(m, src) +} +func (m *NamedTagEventList) XXX_Size() int { + return m.Size() +} +func (m *NamedTagEventList) XXX_DiscardUnknown() { + xxx_messageInfo_NamedTagEventList.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedTagEventList proto.InternalMessageInfo + +func (m *RepositoryImportSpec) Reset() { *m = RepositoryImportSpec{} } +func (*RepositoryImportSpec) ProtoMessage() {} +func (*RepositoryImportSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{26} +} +func (m *RepositoryImportSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RepositoryImportSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RepositoryImportSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_RepositoryImportSpec.Merge(m, src) +} +func (m *RepositoryImportSpec) XXX_Size() int { + return m.Size() +} +func (m *RepositoryImportSpec) XXX_DiscardUnknown() { + xxx_messageInfo_RepositoryImportSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_RepositoryImportSpec proto.InternalMessageInfo + +func (m *RepositoryImportStatus) Reset() { *m = RepositoryImportStatus{} } +func (*RepositoryImportStatus) ProtoMessage() {} +func (*RepositoryImportStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{27} +} +func (m *RepositoryImportStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RepositoryImportStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RepositoryImportStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_RepositoryImportStatus.Merge(m, src) +} +func (m *RepositoryImportStatus) XXX_Size() int { + return m.Size() +} +func (m *RepositoryImportStatus) XXX_DiscardUnknown() { + xxx_messageInfo_RepositoryImportStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_RepositoryImportStatus proto.InternalMessageInfo + +func (m *SecretList) Reset() { *m = SecretList{} } +func (*SecretList) ProtoMessage() {} +func (*SecretList) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{28} +} +func (m *SecretList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretList) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretList.Merge(m, src) +} +func (m *SecretList) XXX_Size() int { + return m.Size() +} +func (m *SecretList) XXX_DiscardUnknown() { + xxx_messageInfo_SecretList.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretList proto.InternalMessageInfo + +func (m *SignatureCondition) Reset() { *m = SignatureCondition{} } +func (*SignatureCondition) ProtoMessage() {} +func (*SignatureCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{29} +} +func (m *SignatureCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignatureCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SignatureCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignatureCondition.Merge(m, src) +} +func (m *SignatureCondition) XXX_Size() int { + return m.Size() +} +func (m *SignatureCondition) XXX_DiscardUnknown() { + xxx_messageInfo_SignatureCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_SignatureCondition proto.InternalMessageInfo + +func (m *SignatureGenericEntity) Reset() { *m = SignatureGenericEntity{} } +func (*SignatureGenericEntity) ProtoMessage() {} +func (*SignatureGenericEntity) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{30} +} +func (m *SignatureGenericEntity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignatureGenericEntity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SignatureGenericEntity) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignatureGenericEntity.Merge(m, src) +} +func (m *SignatureGenericEntity) XXX_Size() int { + return m.Size() +} +func (m *SignatureGenericEntity) XXX_DiscardUnknown() { + xxx_messageInfo_SignatureGenericEntity.DiscardUnknown(m) +} + +var xxx_messageInfo_SignatureGenericEntity proto.InternalMessageInfo + +func (m *SignatureIssuer) Reset() { *m = SignatureIssuer{} } +func (*SignatureIssuer) ProtoMessage() {} +func (*SignatureIssuer) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{31} +} +func (m *SignatureIssuer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignatureIssuer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SignatureIssuer) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignatureIssuer.Merge(m, src) +} +func (m *SignatureIssuer) XXX_Size() int { + return m.Size() +} +func (m *SignatureIssuer) XXX_DiscardUnknown() { + xxx_messageInfo_SignatureIssuer.DiscardUnknown(m) +} + +var xxx_messageInfo_SignatureIssuer proto.InternalMessageInfo + +func (m *SignatureSubject) Reset() { *m = SignatureSubject{} } +func (*SignatureSubject) ProtoMessage() {} +func (*SignatureSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{32} +} +func (m *SignatureSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignatureSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SignatureSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignatureSubject.Merge(m, src) +} +func (m *SignatureSubject) XXX_Size() int { + return m.Size() +} +func (m *SignatureSubject) XXX_DiscardUnknown() { + xxx_messageInfo_SignatureSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_SignatureSubject proto.InternalMessageInfo + +func (m *TagEvent) Reset() { *m = TagEvent{} } +func (*TagEvent) ProtoMessage() {} +func (*TagEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{33} +} +func (m *TagEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagEvent.Merge(m, src) +} +func (m *TagEvent) XXX_Size() int { + return m.Size() +} +func (m *TagEvent) XXX_DiscardUnknown() { + xxx_messageInfo_TagEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_TagEvent proto.InternalMessageInfo + +func (m *TagEventCondition) Reset() { *m = TagEventCondition{} } +func (*TagEventCondition) ProtoMessage() {} +func (*TagEventCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{34} +} +func (m *TagEventCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagEventCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagEventCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagEventCondition.Merge(m, src) +} +func (m *TagEventCondition) XXX_Size() int { + return m.Size() +} +func (m *TagEventCondition) XXX_DiscardUnknown() { + xxx_messageInfo_TagEventCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_TagEventCondition proto.InternalMessageInfo + +func (m *TagImportPolicy) Reset() { *m = TagImportPolicy{} } +func (*TagImportPolicy) ProtoMessage() {} +func (*TagImportPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{35} +} +func (m *TagImportPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagImportPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagImportPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagImportPolicy.Merge(m, src) +} +func (m *TagImportPolicy) XXX_Size() int { + return m.Size() +} +func (m *TagImportPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_TagImportPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_TagImportPolicy proto.InternalMessageInfo + +func (m *TagReference) Reset() { *m = TagReference{} } +func (*TagReference) ProtoMessage() {} +func (*TagReference) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{36} +} +func (m *TagReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagReference.Merge(m, src) +} +func (m *TagReference) XXX_Size() int { + return m.Size() +} +func (m *TagReference) XXX_DiscardUnknown() { + xxx_messageInfo_TagReference.DiscardUnknown(m) +} + +var xxx_messageInfo_TagReference proto.InternalMessageInfo + +func (m *TagReferencePolicy) Reset() { *m = TagReferencePolicy{} } +func (*TagReferencePolicy) ProtoMessage() {} +func (*TagReferencePolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{37} +} +func (m *TagReferencePolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagReferencePolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagReferencePolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagReferencePolicy.Merge(m, src) +} +func (m *TagReferencePolicy) XXX_Size() int { + return m.Size() +} +func (m *TagReferencePolicy) XXX_DiscardUnknown() { + xxx_messageInfo_TagReferencePolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_TagReferencePolicy proto.InternalMessageInfo + +func init() { + proto.RegisterType((*DockerImageReference)(nil), "github.com.openshift.api.image.v1.DockerImageReference") + proto.RegisterType((*Image)(nil), "github.com.openshift.api.image.v1.Image") + proto.RegisterType((*ImageBlobReferences)(nil), "github.com.openshift.api.image.v1.ImageBlobReferences") + proto.RegisterType((*ImageImportSpec)(nil), "github.com.openshift.api.image.v1.ImageImportSpec") + proto.RegisterType((*ImageImportStatus)(nil), "github.com.openshift.api.image.v1.ImageImportStatus") + proto.RegisterType((*ImageLayer)(nil), "github.com.openshift.api.image.v1.ImageLayer") + proto.RegisterType((*ImageLayerData)(nil), "github.com.openshift.api.image.v1.ImageLayerData") + proto.RegisterType((*ImageList)(nil), "github.com.openshift.api.image.v1.ImageList") + proto.RegisterType((*ImageLookupPolicy)(nil), "github.com.openshift.api.image.v1.ImageLookupPolicy") + proto.RegisterType((*ImageManifest)(nil), "github.com.openshift.api.image.v1.ImageManifest") + proto.RegisterType((*ImageSignature)(nil), "github.com.openshift.api.image.v1.ImageSignature") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.image.v1.ImageSignature.SignedClaimsEntry") + proto.RegisterType((*ImageStream)(nil), "github.com.openshift.api.image.v1.ImageStream") + proto.RegisterType((*ImageStreamImage)(nil), "github.com.openshift.api.image.v1.ImageStreamImage") + proto.RegisterType((*ImageStreamImport)(nil), "github.com.openshift.api.image.v1.ImageStreamImport") + proto.RegisterType((*ImageStreamImportSpec)(nil), "github.com.openshift.api.image.v1.ImageStreamImportSpec") + proto.RegisterType((*ImageStreamImportStatus)(nil), "github.com.openshift.api.image.v1.ImageStreamImportStatus") + proto.RegisterType((*ImageStreamLayers)(nil), "github.com.openshift.api.image.v1.ImageStreamLayers") + proto.RegisterMapType((map[string]ImageLayerData)(nil), "github.com.openshift.api.image.v1.ImageStreamLayers.BlobsEntry") + proto.RegisterMapType((map[string]ImageBlobReferences)(nil), "github.com.openshift.api.image.v1.ImageStreamLayers.ImagesEntry") + proto.RegisterType((*ImageStreamList)(nil), "github.com.openshift.api.image.v1.ImageStreamList") + proto.RegisterType((*ImageStreamMapping)(nil), "github.com.openshift.api.image.v1.ImageStreamMapping") + proto.RegisterType((*ImageStreamSpec)(nil), "github.com.openshift.api.image.v1.ImageStreamSpec") + proto.RegisterType((*ImageStreamStatus)(nil), "github.com.openshift.api.image.v1.ImageStreamStatus") + proto.RegisterType((*ImageStreamTag)(nil), "github.com.openshift.api.image.v1.ImageStreamTag") + proto.RegisterType((*ImageStreamTagList)(nil), "github.com.openshift.api.image.v1.ImageStreamTagList") + proto.RegisterType((*ImageTag)(nil), "github.com.openshift.api.image.v1.ImageTag") + proto.RegisterType((*ImageTagList)(nil), "github.com.openshift.api.image.v1.ImageTagList") + proto.RegisterType((*NamedTagEventList)(nil), "github.com.openshift.api.image.v1.NamedTagEventList") + proto.RegisterType((*RepositoryImportSpec)(nil), "github.com.openshift.api.image.v1.RepositoryImportSpec") + proto.RegisterType((*RepositoryImportStatus)(nil), "github.com.openshift.api.image.v1.RepositoryImportStatus") + proto.RegisterType((*SecretList)(nil), "github.com.openshift.api.image.v1.SecretList") + proto.RegisterType((*SignatureCondition)(nil), "github.com.openshift.api.image.v1.SignatureCondition") + proto.RegisterType((*SignatureGenericEntity)(nil), "github.com.openshift.api.image.v1.SignatureGenericEntity") + proto.RegisterType((*SignatureIssuer)(nil), "github.com.openshift.api.image.v1.SignatureIssuer") + proto.RegisterType((*SignatureSubject)(nil), "github.com.openshift.api.image.v1.SignatureSubject") + proto.RegisterType((*TagEvent)(nil), "github.com.openshift.api.image.v1.TagEvent") + proto.RegisterType((*TagEventCondition)(nil), "github.com.openshift.api.image.v1.TagEventCondition") + proto.RegisterType((*TagImportPolicy)(nil), "github.com.openshift.api.image.v1.TagImportPolicy") + proto.RegisterType((*TagReference)(nil), "github.com.openshift.api.image.v1.TagReference") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.image.v1.TagReference.AnnotationsEntry") + proto.RegisterType((*TagReferencePolicy)(nil), "github.com.openshift.api.image.v1.TagReferencePolicy") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/image/v1/generated.proto", fileDescriptor_650a0b34f65fde60) +} + +var fileDescriptor_650a0b34f65fde60 = []byte{ + // 2691 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5a, 0x4d, 0x6c, 0x1b, 0xc7, + 0x15, 0xf6, 0xf2, 0x4f, 0xd4, 0x13, 0x25, 0x59, 0x63, 0xcb, 0x61, 0x68, 0x47, 0x92, 0xd7, 0xb5, + 0xe1, 0x34, 0x0e, 0x19, 0xa9, 0x4e, 0x2a, 0xbb, 0x40, 0x1d, 0xd3, 0x74, 0x0d, 0xb6, 0x62, 0xac, + 0x8c, 0x58, 0xa3, 0x35, 0x5c, 0xa0, 0xab, 0xe5, 0x68, 0xb5, 0x15, 0xb9, 0xcb, 0xee, 0x2e, 0x95, + 0xc8, 0x68, 0x81, 0xa2, 0x28, 0x82, 0x1c, 0x7a, 0x68, 0xcf, 0x39, 0x16, 0x41, 0x51, 0x14, 0xe8, + 0xa5, 0x68, 0xd0, 0x53, 0x2f, 0x4d, 0x01, 0xa3, 0xa7, 0x20, 0xe8, 0x21, 0x97, 0x0a, 0xb1, 0xda, + 0x73, 0x6f, 0xbd, 0xf8, 0x54, 0xcc, 0xcf, 0xfe, 0x72, 0x29, 0xed, 0xaa, 0x16, 0xdb, 0xdc, 0xc8, + 0x79, 0xef, 0x7d, 0x6f, 0xe6, 0xbd, 0x37, 0xef, 0xbd, 0x99, 0x59, 0x58, 0xd6, 0x74, 0x67, 0x7b, + 0xb0, 0x59, 0x55, 0xcd, 0x5e, 0xcd, 0xec, 0x13, 0xc3, 0xde, 0xd6, 0xb7, 0x9c, 0x9a, 0xd2, 0xd7, + 0x6b, 0x7a, 0x4f, 0xd1, 0x48, 0x6d, 0x77, 0xb9, 0xa6, 0x11, 0x83, 0x58, 0x8a, 0x43, 0x3a, 0xd5, + 0xbe, 0x65, 0x3a, 0x26, 0xba, 0xe8, 0x8b, 0x54, 0x3d, 0x91, 0xaa, 0xd2, 0xd7, 0xab, 0x4c, 0xa4, + 0xba, 0xbb, 0x5c, 0x79, 0x35, 0x80, 0xaa, 0x99, 0x9a, 0x59, 0x63, 0x92, 0x9b, 0x83, 0x2d, 0xf6, + 0x8f, 0xfd, 0x61, 0xbf, 0x38, 0x62, 0x45, 0xde, 0x59, 0xb5, 0xab, 0xba, 0xc9, 0xd4, 0xaa, 0xa6, + 0x15, 0xa7, 0xb5, 0x72, 0xdd, 0xe7, 0xe9, 0x29, 0xea, 0xb6, 0x6e, 0x10, 0x6b, 0xaf, 0xd6, 0xdf, + 0xd1, 0xe8, 0x80, 0x5d, 0xeb, 0x11, 0x47, 0x89, 0x93, 0xaa, 0x8d, 0x92, 0xb2, 0x06, 0x86, 0xa3, + 0xf7, 0xc8, 0x90, 0xc0, 0x1b, 0x47, 0x09, 0xd8, 0xea, 0x36, 0xe9, 0x29, 0x51, 0x39, 0xf9, 0x53, + 0x09, 0xce, 0x36, 0x4c, 0x75, 0x87, 0x58, 0x4d, 0x6a, 0x04, 0x4c, 0xb6, 0x88, 0x45, 0x0c, 0x95, + 0xa0, 0x6b, 0x50, 0xb4, 0x88, 0xa6, 0xdb, 0x8e, 0xb5, 0x57, 0x96, 0x96, 0xa4, 0xab, 0x93, 0xf5, + 0xd3, 0x4f, 0xf6, 0x17, 0x4f, 0x1d, 0xec, 0x2f, 0x16, 0xb1, 0x18, 0xc7, 0x1e, 0x07, 0xaa, 0xc1, + 0xa4, 0xa1, 0xf4, 0x88, 0xdd, 0x57, 0x54, 0x52, 0xce, 0x30, 0xf6, 0x39, 0xc1, 0x3e, 0xf9, 0x96, + 0x4b, 0xc0, 0x3e, 0x0f, 0x5a, 0x82, 0x1c, 0xfd, 0x53, 0xce, 0x32, 0xde, 0x92, 0xe0, 0xcd, 0x51, + 0x5e, 0xcc, 0x28, 0xe8, 0x25, 0xc8, 0x3a, 0x8a, 0x56, 0xce, 0x31, 0x86, 0x29, 0xc1, 0x90, 0x6d, + 0x2b, 0x1a, 0xa6, 0xe3, 0xa8, 0x02, 0x19, 0xbd, 0x51, 0xce, 0x33, 0x2a, 0x08, 0x6a, 0xa6, 0xd9, + 0xc0, 0x19, 0xbd, 0x21, 0xff, 0xad, 0x08, 0x79, 0xb6, 0x1c, 0xf4, 0x7d, 0x28, 0x52, 0x13, 0x77, + 0x14, 0x47, 0x61, 0xab, 0x98, 0x5a, 0x79, 0xad, 0xca, 0x2d, 0x55, 0x0d, 0x5a, 0xaa, 0xda, 0xdf, + 0xd1, 0xe8, 0x80, 0x5d, 0xa5, 0xdc, 0xd5, 0xdd, 0xe5, 0xea, 0xfd, 0xcd, 0x1f, 0x10, 0xd5, 0x69, + 0x11, 0x47, 0xa9, 0x23, 0x81, 0x0e, 0xfe, 0x18, 0xf6, 0x50, 0xd1, 0x3a, 0x9c, 0xed, 0xc4, 0xd8, + 0x4f, 0x18, 0xe1, 0x82, 0x90, 0x8d, 0xb5, 0x31, 0x8e, 0x95, 0x44, 0x3f, 0x82, 0x33, 0x81, 0xf1, + 0x96, 0x3b, 0xfd, 0x2c, 0x9b, 0xfe, 0xab, 0x23, 0xa7, 0x2f, 0x1c, 0x5d, 0xc5, 0xca, 0x3b, 0x77, + 0xdf, 0x75, 0x88, 0x61, 0xeb, 0xa6, 0x51, 0x3f, 0x2f, 0xf4, 0x9f, 0x69, 0x0c, 0x23, 0xe2, 0x38, + 0x35, 0x68, 0x13, 0x2a, 0x31, 0xc3, 0x0f, 0x88, 0x45, 0xf1, 0x84, 0x37, 0x64, 0x81, 0x5a, 0x69, + 0x8c, 0xe4, 0xc4, 0x87, 0xa0, 0xa0, 0x56, 0x78, 0x85, 0x8a, 0xa1, 0x6f, 0x11, 0xdb, 0x11, 0xce, + 0x8c, 0x9d, 0xb2, 0x60, 0xc1, 0x71, 0x72, 0x68, 0x17, 0xe6, 0x02, 0xc3, 0x6b, 0xca, 0x1e, 0xb1, + 0xec, 0x72, 0x61, 0x29, 0xcb, 0xcc, 0x75, 0xe4, 0xa6, 0xaf, 0xfa, 0x52, 0xf5, 0x17, 0x85, 0xee, + 0xb9, 0x46, 0x14, 0x0f, 0x0f, 0xab, 0x40, 0x04, 0xc0, 0xd6, 0x35, 0x43, 0x71, 0x06, 0x16, 0xb1, + 0xcb, 0x13, 0x4c, 0xe1, 0x72, 0x52, 0x85, 0x1b, 0xae, 0xa4, 0x1f, 0x5f, 0xde, 0x90, 0x8d, 0x03, + 0xc0, 0xe8, 0x3e, 0xcc, 0x07, 0x74, 0xfb, 0x4c, 0xe5, 0xe2, 0x52, 0xf6, 0x6a, 0xa9, 0xfe, 0xe2, + 0xc1, 0xfe, 0xe2, 0x7c, 0x23, 0x8e, 0x01, 0xc7, 0xcb, 0xa1, 0x6d, 0xb8, 0x10, 0x63, 0xc6, 0x16, + 0xe9, 0xe8, 0x4a, 0x7b, 0xaf, 0x4f, 0xca, 0x93, 0xcc, 0x0f, 0x5f, 0x12, 0xd3, 0xba, 0xd0, 0x38, + 0x84, 0x17, 0x1f, 0x8a, 0x84, 0xee, 0x85, 0x3c, 0x73, 0xc7, 0x34, 0xb6, 0x74, 0xad, 0x0c, 0x0c, + 0x3e, 0xce, 0xd4, 0x9c, 0x01, 0x0f, 0xcb, 0xa0, 0x9f, 0x4a, 0xa1, 0x6d, 0xe6, 0x6a, 0xb2, 0xcb, + 0x53, 0xcc, 0xea, 0xaf, 0x25, 0xb5, 0xba, 0x2b, 0x18, 0xbb, 0x31, 0x3d, 0x54, 0x1c, 0xab, 0x4b, + 0xfe, 0x58, 0x82, 0x33, 0x6c, 0xa8, 0xde, 0x35, 0x37, 0xbd, 0xfd, 0x6a, 0xa3, 0x55, 0x28, 0x31, + 0x2d, 0x2d, 0xdd, 0xb6, 0x75, 0x43, 0x63, 0x3b, 0xb5, 0x58, 0x3f, 0x2b, 0x34, 0x94, 0x9a, 0x01, + 0x1a, 0x0e, 0x71, 0x22, 0x19, 0x0a, 0x5d, 0x1e, 0xae, 0xd2, 0x52, 0x96, 0x26, 0xb2, 0x83, 0xfd, + 0xc5, 0x82, 0x08, 0x38, 0x41, 0xa1, 0x3c, 0x2a, 0x37, 0x1c, 0x4f, 0x29, 0x8c, 0x47, 0x58, 0x4a, + 0x50, 0xd0, 0x2b, 0x30, 0xd9, 0xf3, 0x4c, 0x92, 0x63, 0x50, 0xd3, 0x34, 0xf5, 0xfa, 0x2b, 0xf2, + 0xe9, 0xf2, 0x5f, 0xb2, 0x30, 0xcb, 0xe6, 0xd4, 0xec, 0xf5, 0x4d, 0xcb, 0xd9, 0xe8, 0x13, 0x15, + 0xdd, 0x85, 0xdc, 0x96, 0x65, 0xf6, 0x44, 0x8e, 0xbc, 0x14, 0x48, 0x32, 0x55, 0x5a, 0xd8, 0xfc, + 0x8c, 0xe8, 0x2d, 0xdb, 0xcf, 0xd9, 0xdf, 0xb0, 0xcc, 0x1e, 0x66, 0xe2, 0xe8, 0x4d, 0xc8, 0x38, + 0x26, 0x9b, 0xe7, 0xd4, 0xca, 0xd5, 0x38, 0x90, 0x35, 0x53, 0x55, 0xba, 0x51, 0xa4, 0x02, 0x4d, + 0xdd, 0x6d, 0x13, 0x67, 0x1c, 0x13, 0x75, 0xa9, 0x2d, 0xe9, 0xb4, 0xd6, 0xcd, 0xae, 0xae, 0xee, + 0x89, 0xac, 0xb7, 0x92, 0xc0, 0xbf, 0x6d, 0x45, 0x6b, 0x06, 0x24, 0x83, 0xf6, 0xf7, 0x47, 0x71, + 0x08, 0x1d, 0xbd, 0x0b, 0xb3, 0x96, 0x3b, 0x0d, 0xa1, 0x30, 0xcf, 0x14, 0xbe, 0x9e, 0x4c, 0x21, + 0x0e, 0x0b, 0xd7, 0x5f, 0x10, 0x3a, 0x67, 0x23, 0x04, 0x1c, 0x55, 0x83, 0x6e, 0xc3, 0xac, 0x6e, + 0xa8, 0xdd, 0x41, 0xc7, 0x4f, 0x7f, 0x39, 0x16, 0x36, 0x1e, 0x44, 0x33, 0x4c, 0xc6, 0x51, 0x7e, + 0xf9, 0x77, 0x19, 0x98, 0x0b, 0xfa, 0xd1, 0x51, 0x9c, 0x81, 0x8d, 0xda, 0x50, 0xb0, 0xd9, 0x2f, + 0xe1, 0xcb, 0x6b, 0xc9, 0xea, 0x1d, 0x97, 0xae, 0xcf, 0x08, 0xed, 0x05, 0xfe, 0x1f, 0x0b, 0x2c, + 0xd4, 0x84, 0x3c, 0x5b, 0xb7, 0xe7, 0xdb, 0x84, 0xfb, 0xad, 0x3e, 0x79, 0xb0, 0xbf, 0xc8, 0x6b, + 0x31, 0xe6, 0x08, 0x6e, 0x5d, 0xcf, 0x8e, 0xa8, 0xeb, 0xdf, 0x8d, 0x86, 0x72, 0x1a, 0x6d, 0x5e, + 0xcf, 0x11, 0x1b, 0xf8, 0xef, 0x49, 0x00, 0x7e, 0xfe, 0xf6, 0x5a, 0x10, 0x69, 0x64, 0x0b, 0x72, + 0x19, 0x72, 0xb6, 0xfe, 0x98, 0x2f, 0x3a, 0xeb, 0x83, 0x33, 0xf1, 0x0d, 0xfd, 0x31, 0xc1, 0x8c, + 0x4c, 0x9b, 0x9f, 0x9e, 0x97, 0x3c, 0xb3, 0xe1, 0xe6, 0xc7, 0xcf, 0x94, 0x3e, 0x8f, 0xdc, 0x81, + 0x19, 0x7f, 0x1e, 0x0d, 0x5a, 0x75, 0x2f, 0x0a, 0x4d, 0x12, 0xd3, 0x34, 0x7d, 0xa4, 0x96, 0x4c, + 0x02, 0x2d, 0x7f, 0x94, 0x60, 0x92, 0xab, 0xd1, 0x6d, 0x07, 0x3d, 0x1a, 0xea, 0x84, 0xaa, 0xc9, + 0x22, 0x83, 0x4a, 0xb3, 0x3e, 0xc8, 0xeb, 0xff, 0xdc, 0x91, 0x40, 0x17, 0xd4, 0x82, 0xbc, 0xee, + 0x90, 0x9e, 0x5d, 0xce, 0xa4, 0xf4, 0xd8, 0xb4, 0x00, 0xcd, 0x37, 0xa9, 0x38, 0xe6, 0x28, 0xf2, + 0xaa, 0x88, 0xec, 0x35, 0xd3, 0xdc, 0x19, 0xf4, 0xc5, 0x96, 0xb9, 0x04, 0xf9, 0x2e, 0x4d, 0x1f, + 0x22, 0xbf, 0x7a, 0x92, 0x2c, 0xa7, 0x60, 0x4e, 0x93, 0x7f, 0x95, 0x81, 0xe9, 0x70, 0x77, 0x70, + 0x05, 0x0a, 0x1d, 0x5d, 0xa3, 0x1b, 0x8c, 0x3b, 0xda, 0x0b, 0xf1, 0x06, 0x1b, 0xc5, 0x82, 0x9a, + 0xda, 0xbe, 0x34, 0xed, 0xbb, 0xb1, 0x45, 0xdd, 0xc4, 0xa6, 0x95, 0xf5, 0xd3, 0x4e, 0x2b, 0x40, + 0xc3, 0x21, 0x4e, 0x2a, 0xa9, 0x58, 0xea, 0xb6, 0xee, 0x10, 0x95, 0x56, 0x64, 0xd1, 0x55, 0x79, + 0x92, 0xb7, 0x03, 0x34, 0x1c, 0xe2, 0xa4, 0x5d, 0xaf, 0x69, 0x47, 0xbb, 0xde, 0xfb, 0x1b, 0x38, + 0x63, 0xda, 0xe8, 0x65, 0x98, 0xd8, 0x55, 0x2c, 0x5d, 0x31, 0x9c, 0x72, 0x81, 0x31, 0xcc, 0x0a, + 0x86, 0x89, 0x07, 0x7c, 0x18, 0xbb, 0x74, 0xf9, 0xf7, 0x05, 0x11, 0x81, 0x5e, 0x57, 0x30, 0x86, + 0x4e, 0x79, 0x09, 0x72, 0x8e, 0x6f, 0x5b, 0x6f, 0xbf, 0x31, 0xb3, 0x32, 0x0a, 0xba, 0x0c, 0x13, + 0xaa, 0x69, 0x38, 0xc4, 0x70, 0x98, 0x31, 0x4b, 0xf5, 0x29, 0x3a, 0xfb, 0x3b, 0x7c, 0x08, 0xbb, + 0x34, 0xa4, 0x03, 0xa8, 0xa6, 0xd1, 0xd1, 0x1d, 0xdd, 0x34, 0xdc, 0x1c, 0x91, 0x24, 0x61, 0x7b, + 0x8b, 0xbd, 0xe3, 0x4a, 0xfb, 0x33, 0xf6, 0x86, 0x6c, 0x1c, 0x00, 0x47, 0x5f, 0x83, 0x69, 0x26, + 0xde, 0xec, 0x10, 0xc3, 0xd1, 0x9d, 0x3d, 0x61, 0xfa, 0x79, 0x21, 0xc6, 0x43, 0xcd, 0x25, 0xe2, + 0x30, 0x2f, 0xfa, 0x31, 0x94, 0x68, 0x1b, 0x47, 0x3a, 0x77, 0xba, 0x8a, 0xde, 0x73, 0x5b, 0xd2, + 0x3b, 0xa9, 0x3b, 0x44, 0x36, 0x71, 0x17, 0xe5, 0xae, 0xe1, 0x58, 0x81, 0xe2, 0x16, 0x24, 0xe1, + 0x90, 0x3a, 0xf4, 0x36, 0x4c, 0xa8, 0x16, 0xa1, 0x67, 0xbd, 0xf2, 0x04, 0x73, 0xe8, 0x97, 0x93, + 0x39, 0xb4, 0xad, 0xf7, 0x88, 0xb0, 0x3c, 0x17, 0xc7, 0x2e, 0x0e, 0x4d, 0x22, 0xba, 0x6d, 0x0f, + 0x48, 0xa7, 0xbe, 0x57, 0x2e, 0x26, 0xae, 0xcc, 0xde, 0x42, 0x9a, 0x54, 0xd6, 0xaa, 0x97, 0x68, + 0x12, 0x69, 0x0a, 0x1c, 0xec, 0x21, 0xa2, 0xef, 0xb9, 0xe8, 0x6d, 0x93, 0xf5, 0xa0, 0x53, 0x2b, + 0x5f, 0x49, 0x83, 0xbe, 0x31, 0x60, 0x51, 0x17, 0x84, 0x6f, 0x9b, 0xd8, 0x83, 0xac, 0xdc, 0x82, + 0xb9, 0x21, 0x43, 0xa2, 0xd3, 0x90, 0xdd, 0x21, 0xe2, 0x84, 0x8b, 0xe9, 0x4f, 0x74, 0x16, 0xf2, + 0xbb, 0x4a, 0x77, 0x20, 0xe2, 0x14, 0xf3, 0x3f, 0x37, 0x33, 0xab, 0x12, 0xcd, 0x2d, 0x53, 0xdc, + 0x33, 0x8e, 0x45, 0x94, 0xde, 0x18, 0xb6, 0x4c, 0x1b, 0x72, 0x76, 0x9f, 0xa8, 0xa2, 0xea, 0xae, + 0x24, 0x8e, 0x1c, 0x36, 0x3f, 0xda, 0xd8, 0xf9, 0xdb, 0x8c, 0xfe, 0xc3, 0x0c, 0x0d, 0x3d, 0xf2, + 0x5a, 0x04, 0xde, 0x5d, 0x5d, 0x4f, 0x89, 0x7b, 0x68, 0xab, 0x20, 0xff, 0x59, 0x82, 0xd3, 0x01, + 0xee, 0x71, 0x9d, 0xc3, 0x5b, 0xc7, 0xed, 0x50, 0xfc, 0x0a, 0x14, 0xe8, 0x52, 0xe4, 0x3f, 0xb8, + 0xcd, 0x95, 0xbb, 0x0a, 0xda, 0x62, 0x8d, 0x61, 0x19, 0x0f, 0x43, 0x1e, 0x5f, 0x4d, 0xe7, 0x19, + 0xbf, 0xa1, 0x8f, 0xf5, 0xfb, 0x66, 0xc4, 0xef, 0x37, 0x8f, 0x85, 0x7e, 0xb8, 0xf7, 0x7f, 0x96, + 0x81, 0xf9, 0xd8, 0x19, 0xd1, 0x3a, 0xcc, 0x7b, 0x6f, 0x66, 0xb9, 0xa2, 0x8f, 0xc0, 0x79, 0xb0, + 0xa0, 0x22, 0x0d, 0xc0, 0x22, 0x7d, 0xd3, 0xd6, 0x1d, 0xd3, 0xda, 0x13, 0x76, 0xf8, 0x6a, 0x82, + 0x99, 0x62, 0x4f, 0x28, 0x60, 0x86, 0x19, 0x6a, 0x68, 0x9f, 0x82, 0x03, 0xd0, 0xe8, 0x21, 0x9d, + 0x90, 0xa2, 0x11, 0x6a, 0x8e, 0x6c, 0x9a, 0xed, 0x15, 0xc4, 0xf7, 0x17, 0x41, 0x91, 0xb0, 0x40, + 0x94, 0x3f, 0xca, 0xc0, 0x0b, 0x23, 0x4c, 0x87, 0x70, 0xc8, 0x10, 0xb4, 0x0f, 0x4b, 0xe5, 0x06, + 0x7e, 0x00, 0x8c, 0x18, 0x4d, 0x8f, 0x31, 0xda, 0x8d, 0xe3, 0x18, 0x4d, 0x78, 0xf7, 0x10, 0xb3, + 0x3d, 0x8a, 0x98, 0xed, 0x7a, 0x4a, 0xb3, 0x45, 0xe2, 0x27, 0x62, 0xb8, 0x0f, 0x73, 0xa1, 0x7d, + 0x27, 0x6e, 0x5a, 0x4e, 0x7e, 0xdf, 0x75, 0x20, 0xbf, 0xd9, 0x35, 0x37, 0xdd, 0x06, 0xf6, 0x56, + 0x3a, 0x9f, 0xf0, 0x69, 0x56, 0xeb, 0x14, 0x81, 0x17, 0x68, 0x2f, 0xab, 0xb0, 0x31, 0xcc, 0xc1, + 0xd1, 0x76, 0xc4, 0x76, 0x6f, 0x1e, 0x4b, 0x0d, 0x37, 0x19, 0xd7, 0x33, 0xc2, 0x8e, 0x95, 0x1d, + 0x00, 0x7f, 0x36, 0x31, 0x55, 0xee, 0x5e, 0xb0, 0xca, 0xa5, 0xb8, 0xb6, 0xf2, 0x8e, 0x2c, 0x81, + 0xc2, 0x58, 0xf9, 0xa1, 0xa8, 0x8b, 0x23, 0xb5, 0xad, 0x85, 0xb5, 0xbd, 0x91, 0x38, 0x39, 0x87, + 0x2e, 0x5a, 0x82, 0xb5, 0xf8, 0x63, 0x49, 0x5c, 0x62, 0x08, 0xcb, 0x9c, 0xfc, 0x11, 0x67, 0x23, + 0x7c, 0xc4, 0x49, 0xbb, 0x6b, 0xe3, 0x0f, 0x3a, 0xff, 0x94, 0x00, 0x05, 0xb8, 0x5a, 0x4a, 0xbf, + 0xaf, 0x1b, 0xda, 0x17, 0xae, 0x5c, 0x1e, 0x71, 0xa8, 0x97, 0x7f, 0x93, 0x09, 0x79, 0x8b, 0xd5, + 0x03, 0x03, 0x4a, 0xdd, 0xc0, 0xf1, 0x2e, 0x6d, 0x2f, 0x12, 0x3c, 0x1a, 0xfa, 0xed, 0x70, 0x70, + 0x14, 0x87, 0xf0, 0xd1, 0x46, 0xe8, 0x1a, 0xd5, 0x4f, 0x6e, 0xe2, 0x58, 0xf8, 0x92, 0x80, 0x98, + 0x6f, 0xc4, 0x31, 0xe1, 0x78, 0x59, 0xf4, 0x36, 0xe4, 0x1c, 0x45, 0x73, 0x63, 0xa2, 0x96, 0xf2, + 0xd6, 0x28, 0x70, 0x08, 0x52, 0x34, 0x1b, 0x33, 0x28, 0xf9, 0xd7, 0xe1, 0xce, 0x43, 0x14, 0x8d, + 0x13, 0x99, 0x3d, 0x81, 0xf3, 0xfd, 0xc1, 0x66, 0x57, 0x57, 0x63, 0xa5, 0x84, 0x37, 0x2f, 0x09, + 0xe8, 0xf3, 0xeb, 0xa3, 0x59, 0xf1, 0x61, 0x38, 0xe8, 0x41, 0xc8, 0x48, 0x49, 0x3c, 0xfc, 0x96, + 0xd2, 0x23, 0x9d, 0xb6, 0xa2, 0xdd, 0xdd, 0x25, 0x86, 0x43, 0xf7, 0x62, 0xac, 0xa5, 0x3e, 0xc8, + 0xb9, 0xa7, 0x58, 0x66, 0xa9, 0xb6, 0x32, 0x8e, 0x8d, 0xf3, 0x4d, 0x1e, 0xe9, 0x7c, 0xdb, 0xa4, + 0x76, 0xf8, 0x44, 0xe8, 0xae, 0x6b, 0x05, 0x40, 0xbc, 0xc7, 0xe9, 0xa6, 0x21, 0xee, 0x0f, 0x3c, + 0xed, 0xf7, 0x3c, 0x0a, 0x0e, 0x70, 0x0d, 0x6d, 0x9b, 0xc2, 0x09, 0x6f, 0x9b, 0xed, 0x98, 0xc3, + 0xf6, 0xf5, 0x64, 0xcb, 0x66, 0xde, 0x4b, 0x7e, 0xd6, 0xf6, 0x52, 0x52, 0xfe, 0xb9, 0x74, 0xf0, + 0x7f, 0x0d, 0xa7, 0xd6, 0xb6, 0xa2, 0x8d, 0xa1, 0x48, 0x3c, 0x08, 0x17, 0x89, 0xe5, 0x74, 0x45, + 0xa2, 0xad, 0x68, 0x23, 0xea, 0xc4, 0xe7, 0x19, 0x28, 0x32, 0xc6, 0xf1, 0x04, 0x79, 0x2b, 0x74, + 0x0a, 0x49, 0x1d, 0xe5, 0xc5, 0xc8, 0xc1, 0xe3, 0x3b, 0xc7, 0x38, 0x70, 0x0e, 0xa7, 0x00, 0x38, + 0xec, 0x5e, 0x3a, 0xf7, 0xdf, 0xde, 0x4b, 0xcb, 0x7f, 0x92, 0xa0, 0xe4, 0x9a, 0x78, 0x0c, 0x91, + 0xb2, 0x1e, 0x8e, 0x94, 0x57, 0x92, 0xce, 0x7c, 0x74, 0x8c, 0xfc, 0x4b, 0x82, 0xb9, 0x21, 0xab, + 0xb9, 0x95, 0x59, 0x1a, 0x71, 0xdd, 0x7e, 0x8c, 0x69, 0xb8, 0xf0, 0xf1, 0xd3, 0x88, 0x24, 0x8c, + 0xec, 0xc9, 0x25, 0x0c, 0xf9, 0xfd, 0x2c, 0x9c, 0x8d, 0x3b, 0xf5, 0x3d, 0xaf, 0xd7, 0xac, 0xe8, + 0x5b, 0x54, 0x66, 0xdc, 0x6f, 0x51, 0xb9, 0xff, 0xd9, 0x5b, 0x54, 0x36, 0xe5, 0x5b, 0xd4, 0xfb, + 0x19, 0x38, 0x17, 0x7f, 0x96, 0x3c, 0xa1, 0x07, 0x29, 0xff, 0x14, 0x9a, 0x79, 0xfe, 0xa7, 0x50, + 0x74, 0x13, 0x66, 0x94, 0x0e, 0x0f, 0x33, 0xa5, 0x4b, 0x3b, 0x0e, 0x16, 0xc7, 0x93, 0x75, 0x74, + 0xb0, 0xbf, 0x38, 0x73, 0x3b, 0x44, 0xc1, 0x11, 0x4e, 0xf9, 0xb7, 0x12, 0xc0, 0x06, 0x51, 0x2d, + 0xe2, 0x8c, 0x21, 0x8b, 0xdc, 0x0a, 0x6f, 0xdf, 0x4a, 0x5c, 0xa8, 0xf3, 0xc9, 0x8c, 0x48, 0x1a, + 0x9f, 0x66, 0x01, 0x0d, 0xdf, 0x8b, 0xa3, 0x9b, 0xe2, 0xae, 0x9e, 0xa7, 0x8d, 0x2b, 0xc1, 0xbb, + 0xfa, 0x67, 0xfb, 0x8b, 0xe7, 0x86, 0x25, 0x02, 0xb7, 0xf8, 0x6b, 0x9e, 0xc3, 0xf9, 0x4d, 0xff, + 0xf5, 0xb0, 0x0b, 0x9f, 0xed, 0x2f, 0xc6, 0x7c, 0x37, 0x55, 0xf5, 0x90, 0x22, 0x8e, 0xd6, 0x60, + 0xba, 0xab, 0xd8, 0xce, 0xba, 0x65, 0x6e, 0x92, 0xb6, 0x2e, 0xbe, 0x18, 0x4a, 0x77, 0x97, 0xed, + 0xdd, 0xd6, 0xaf, 0x05, 0x81, 0x70, 0x18, 0x17, 0xed, 0x02, 0xa2, 0x03, 0x6d, 0x4b, 0x31, 0x6c, + 0xbe, 0x24, 0xaa, 0x2d, 0x97, 0x5a, 0x5b, 0x45, 0x68, 0x43, 0x6b, 0x43, 0x68, 0x38, 0x46, 0x03, + 0xba, 0x02, 0x05, 0x8b, 0x28, 0xb6, 0x69, 0x88, 0xb7, 0x05, 0x2f, 0x26, 0x31, 0x1b, 0xc5, 0x82, + 0x8a, 0x5e, 0x86, 0x89, 0x1e, 0xb1, 0x6d, 0x5a, 0xec, 0x22, 0xcf, 0x3b, 0x2d, 0x3e, 0x8c, 0x5d, + 0xba, 0xfc, 0x9e, 0x04, 0xbe, 0x8b, 0x58, 0x1f, 0xa9, 0xab, 0x77, 0xf9, 0x9b, 0xc4, 0x2a, 0x94, + 0x4c, 0x4b, 0x53, 0x0c, 0xfd, 0x31, 0x6f, 0x3a, 0xa5, 0xf0, 0xd3, 0xd3, 0xfd, 0x00, 0x0d, 0x87, + 0x38, 0x69, 0xb3, 0xaa, 0x9a, 0xbd, 0x9e, 0x69, 0xd0, 0x1a, 0x23, 0x5c, 0x1b, 0xc8, 0xd0, 0x2e, + 0x05, 0x07, 0xb8, 0xe4, 0x0f, 0x25, 0x98, 0x8d, 0xdc, 0xfe, 0xa3, 0x5f, 0x4a, 0x70, 0xce, 0x8e, + 0x9d, 0x9c, 0xd8, 0x1f, 0x37, 0xd2, 0x5c, 0xfa, 0x87, 0x00, 0xea, 0x0b, 0x62, 0x3e, 0x23, 0x56, + 0x8f, 0x47, 0x28, 0x96, 0xff, 0x2e, 0xc1, 0xe9, 0xe8, 0x3b, 0xc2, 0xff, 0xe3, 0x44, 0xd1, 0xeb, + 0x30, 0xc5, 0x4f, 0x5a, 0xdf, 0x22, 0x7b, 0xcd, 0x86, 0xf0, 0xc2, 0x19, 0x01, 0x36, 0xb5, 0xee, + 0x93, 0x70, 0x90, 0x4f, 0xfe, 0x79, 0x06, 0x8a, 0x6e, 0x7d, 0x45, 0xdf, 0xf6, 0xdf, 0x85, 0xa4, + 0xd4, 0xd1, 0xed, 0x05, 0xdd, 0xd0, 0xdb, 0xd0, 0xf3, 0xff, 0x10, 0xee, 0x92, 0xdb, 0xdc, 0xf1, + 0x83, 0x68, 0xfc, 0xcd, 0x43, 0xf8, 0x0c, 0x95, 0x4b, 0x72, 0x86, 0x92, 0x3f, 0xc8, 0xc2, 0xdc, + 0x50, 0xbb, 0x81, 0x6e, 0x84, 0x72, 0xde, 0xe5, 0x48, 0xce, 0x9b, 0x1f, 0x12, 0x38, 0xb1, 0x94, + 0x17, 0x9f, 0x89, 0xb2, 0x63, 0xcc, 0x44, 0xb9, 0xa4, 0x99, 0x28, 0x7f, 0x78, 0x26, 0x8a, 0x78, + 0xa7, 0x90, 0xc8, 0x3b, 0x1f, 0x49, 0x30, 0x1b, 0x69, 0xa0, 0xd0, 0x35, 0x28, 0xea, 0x86, 0x4d, + 0xd4, 0x81, 0x45, 0xc4, 0xf3, 0x81, 0x57, 0x15, 0x9b, 0x62, 0x1c, 0x7b, 0x1c, 0xa8, 0x06, 0x93, + 0xb6, 0xba, 0x4d, 0x3a, 0x83, 0x2e, 0xe9, 0x30, 0x8f, 0x14, 0xfd, 0xa7, 0xfc, 0x0d, 0x97, 0x80, + 0x7d, 0x1e, 0xd4, 0x00, 0xe0, 0xbd, 0x58, 0xcb, 0xec, 0xb8, 0xe1, 0xe6, 0x7e, 0xff, 0x06, 0x4d, + 0x8f, 0xf2, 0x6c, 0x7f, 0x71, 0xc6, 0xff, 0xc7, 0xfc, 0x1f, 0x90, 0x93, 0xff, 0x9d, 0x83, 0x52, + 0xb0, 0x11, 0x4b, 0xf0, 0x85, 0xc9, 0x3b, 0x30, 0xa5, 0x18, 0x86, 0xe9, 0x28, 0xbc, 0x5b, 0xce, + 0x24, 0xbe, 0x15, 0x0e, 0xea, 0xa9, 0xde, 0xf6, 0x21, 0xf8, 0xad, 0xb0, 0x97, 0x11, 0x02, 0x14, + 0x1c, 0xd4, 0x84, 0x6e, 0x8b, 0x16, 0x39, 0x9b, 0xbc, 0x45, 0x2e, 0x46, 0xda, 0xe3, 0x1a, 0x4c, + 0x7a, 0x9d, 0xa4, 0xf8, 0x78, 0xc9, 0xb3, 0xb2, 0xbf, 0xb5, 0x7d, 0x1e, 0x54, 0x0d, 0x05, 0x43, + 0x9e, 0x05, 0xc3, 0xcc, 0x21, 0x57, 0x1d, 0xd1, 0xfe, 0xbb, 0x30, 0xee, 0xfe, 0x7b, 0x62, 0x2c, + 0xfd, 0x77, 0xe5, 0xeb, 0x70, 0x3a, 0xea, 0xc1, 0x54, 0xef, 0xd2, 0xeb, 0x80, 0x86, 0xf5, 0x1f, + 0xd5, 0xc2, 0x0d, 0x4b, 0xf8, 0xf9, 0xac, 0x7e, 0xef, 0xc9, 0xd3, 0x85, 0x53, 0x9f, 0x3c, 0x5d, + 0x38, 0xf5, 0xd9, 0xd3, 0x85, 0x53, 0x3f, 0x39, 0x58, 0x90, 0x9e, 0x1c, 0x2c, 0x48, 0x9f, 0x1c, + 0x2c, 0x48, 0x9f, 0x1d, 0x2c, 0x48, 0x9f, 0x1f, 0x2c, 0x48, 0xbf, 0xf8, 0xc7, 0xc2, 0xa9, 0x87, + 0x17, 0x8f, 0xfc, 0x06, 0xff, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd0, 0xa8, 0x38, 0xe0, 0xa7, + 0x2f, 0x00, 0x00, +} + +func (m *DockerImageReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DockerImageReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DockerImageReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0x2a + i -= len(m.Tag) + copy(dAtA[i:], m.Tag) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Registry) + copy(dAtA[i:], m.Registry) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Registry))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Image) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Image) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Image) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DockerImageManifests) > 0 { + for iNdEx := len(m.DockerImageManifests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DockerImageManifests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } + i -= len(m.DockerImageConfig) + copy(dAtA[i:], m.DockerImageConfig) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageConfig))) + i-- + dAtA[i] = 0x52 + i -= len(m.DockerImageManifestMediaType) + copy(dAtA[i:], m.DockerImageManifestMediaType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageManifestMediaType))) + i-- + dAtA[i] = 0x4a + if len(m.DockerImageSignatures) > 0 { + for iNdEx := len(m.DockerImageSignatures) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DockerImageSignatures[iNdEx]) + copy(dAtA[i:], m.DockerImageSignatures[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageSignatures[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if len(m.Signatures) > 0 { + for iNdEx := len(m.Signatures) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Signatures[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.DockerImageLayers) > 0 { + for iNdEx := len(m.DockerImageLayers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DockerImageLayers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.DockerImageManifest) + copy(dAtA[i:], m.DockerImageManifest) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageManifest))) + i-- + dAtA[i] = 0x2a + i -= len(m.DockerImageMetadataVersion) + copy(dAtA[i:], m.DockerImageMetadataVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageMetadataVersion))) + i-- + dAtA[i] = 0x22 + { + size, err := m.DockerImageMetadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.DockerImageReference) + copy(dAtA[i:], m.DockerImageReference) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageReference))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageBlobReferences) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageBlobReferences) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageBlobReferences) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Manifests) > 0 { + for iNdEx := len(m.Manifests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Manifests[iNdEx]) + copy(dAtA[i:], m.Manifests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Manifests[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i-- + if m.ImageMissing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + if m.Config != nil { + i -= len(*m.Config) + copy(dAtA[i:], *m.Config) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Config))) + i-- + dAtA[i] = 0x12 + } + if len(m.Layers) > 0 { + for iNdEx := len(m.Layers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Layers[iNdEx]) + copy(dAtA[i:], m.Layers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Layers[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ImageImportSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageImportSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageImportSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ReferencePolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + i-- + if m.IncludeManifest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + { + size, err := m.ImportPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.To != nil { + { + size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageImportStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageImportStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageImportStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Manifests) > 0 { + for iNdEx := len(m.Manifests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Manifests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Tag) + copy(dAtA[i:], m.Tag) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag))) + i-- + dAtA[i] = 0x1a + if m.Image != nil { + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageLayer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageLayer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageLayer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MediaType) + copy(dAtA[i:], m.MediaType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MediaType))) + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.LayerSize)) + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageLayerData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageLayerData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageLayerData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MediaType) + copy(dAtA[i:], m.MediaType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MediaType))) + i-- + dAtA[i] = 0x12 + if m.LayerSize != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LayerSize)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ImageList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageLookupPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageLookupPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageLookupPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Local { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + return len(dAtA) - i, nil +} + +func (m *ImageManifest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageManifest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageManifest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Variant) + copy(dAtA[i:], m.Variant) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Variant))) + i-- + dAtA[i] = 0x32 + i -= len(m.OS) + copy(dAtA[i:], m.OS) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OS))) + i-- + dAtA[i] = 0x2a + i -= len(m.Architecture) + copy(dAtA[i:], m.Architecture) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architecture))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.ManifestSize)) + i-- + dAtA[i] = 0x18 + i -= len(m.MediaType) + copy(dAtA[i:], m.MediaType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MediaType))) + i-- + dAtA[i] = 0x12 + i -= len(m.Digest) + copy(dAtA[i:], m.Digest) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Digest))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageSignature) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageSignature) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageSignature) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IssuedTo != nil { + { + size, err := m.IssuedTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.IssuedBy != nil { + { + size, err := m.IssuedBy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.Created != nil { + { + size, err := m.Created.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if len(m.SignedClaims) > 0 { + keysForSignedClaims := make([]string, 0, len(m.SignedClaims)) + for k := range m.SignedClaims { + keysForSignedClaims = append(keysForSignedClaims, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSignedClaims) + for iNdEx := len(keysForSignedClaims) - 1; iNdEx >= 0; iNdEx-- { + v := m.SignedClaims[string(keysForSignedClaims[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForSignedClaims[iNdEx]) + copy(dAtA[i:], keysForSignedClaims[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSignedClaims[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.ImageIdentity) + copy(dAtA[i:], m.ImageIdentity) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageIdentity))) + i-- + dAtA[i] = 0x2a + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.Content != nil { + i -= len(m.Content) + copy(dAtA[i:], m.Content) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Content))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStream) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStream) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStream) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamImage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamImage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamImage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamImport) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamImport) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamImport) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamImportSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamImportSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamImportSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Repository != nil { + { + size, err := m.Repository.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i-- + if m.Import { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ImageStreamImportStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamImportStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamImportStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Repository != nil { + { + size, err := m.Repository.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Import != nil { + { + size, err := m.Import.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ImageStreamLayers) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamLayers) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamLayers) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Images) > 0 { + keysForImages := make([]string, 0, len(m.Images)) + for k := range m.Images { + keysForImages = append(keysForImages, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForImages) + for iNdEx := len(keysForImages) - 1; iNdEx >= 0; iNdEx-- { + v := m.Images[string(keysForImages[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForImages[iNdEx]) + copy(dAtA[i:], keysForImages[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForImages[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Blobs) > 0 { + keysForBlobs := make([]string, 0, len(m.Blobs)) + for k := range m.Blobs { + keysForBlobs = append(keysForBlobs, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForBlobs) + for iNdEx := len(keysForBlobs) - 1; iNdEx >= 0; iNdEx-- { + v := m.Blobs[string(keysForBlobs[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForBlobs[iNdEx]) + copy(dAtA[i:], keysForBlobs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForBlobs[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamMapping) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamMapping) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamMapping) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Tag) + copy(dAtA[i:], m.Tag) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LookupPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Tags) > 0 { + for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tags[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.DockerImageRepository) + copy(dAtA[i:], m.DockerImageRepository) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageRepository))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.PublicDockerImageRepository) + copy(dAtA[i:], m.PublicDockerImageRepository) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PublicDockerImageRepository))) + i-- + dAtA[i] = 0x1a + if len(m.Tags) > 0 { + for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tags[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.DockerImageRepository) + copy(dAtA[i:], m.DockerImageRepository) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageRepository))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamTag) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamTag) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamTag) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LookupPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x18 + if m.Tag != nil { + { + size, err := m.Tag.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamTagList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamTagList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamTagList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageTag) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageTag) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageTag) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Image != nil { + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Spec != nil { + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageTagList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageTagList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageTagList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NamedTagEventList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedTagEventList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamedTagEventList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Tag) + copy(dAtA[i:], m.Tag) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RepositoryImportSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RepositoryImportSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RepositoryImportSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ReferencePolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + i-- + if m.IncludeManifest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + { + size, err := m.ImportPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RepositoryImportStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RepositoryImportStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RepositoryImportStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AdditionalTags) > 0 { + for iNdEx := len(m.AdditionalTags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdditionalTags[iNdEx]) + copy(dAtA[i:], m.AdditionalTags[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdditionalTags[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecretList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SignatureCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignatureCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignatureCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SignatureGenericEntity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignatureGenericEntity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignatureGenericEntity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.CommonName) + copy(dAtA[i:], m.CommonName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CommonName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Organization) + copy(dAtA[i:], m.Organization) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Organization))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SignatureIssuer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignatureIssuer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignatureIssuer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.SignatureGenericEntity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SignatureSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignatureSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignatureSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.PublicKeyID) + copy(dAtA[i:], m.PublicKeyID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PublicKeyID))) + i-- + dAtA[i] = 0x12 + { + size, err := m.SignatureGenericEntity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TagEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x20 + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0x1a + i -= len(m.DockerImageReference) + copy(dAtA[i:], m.DockerImageReference) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageReference))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Created.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TagEventCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagEventCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagEventCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x30 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TagImportPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagImportPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagImportPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ImportMode) + copy(dAtA[i:], m.ImportMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImportMode))) + i-- + dAtA[i] = 0x1a + i-- + if m.Scheduled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i-- + if m.Insecure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *TagReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ReferencePolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + { + size, err := m.ImportPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + if m.Generation != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Generation)) + i-- + dAtA[i] = 0x28 + } + i-- + if m.Reference { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if m.From != nil { + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TagReferencePolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagReferencePolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagReferencePolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *DockerImageReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Registry) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Tag) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Image) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DockerImageReference) + n += 1 + l + sovGenerated(uint64(l)) + l = m.DockerImageMetadata.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DockerImageMetadataVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DockerImageManifest) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.DockerImageLayers) > 0 { + for _, e := range m.DockerImageLayers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Signatures) > 0 { + for _, e := range m.Signatures { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.DockerImageSignatures) > 0 { + for _, b := range m.DockerImageSignatures { + l = len(b) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.DockerImageManifestMediaType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DockerImageConfig) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.DockerImageManifests) > 0 { + for _, e := range m.DockerImageManifests { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageBlobReferences) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Layers) > 0 { + for _, s := range m.Layers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Config != nil { + l = len(*m.Config) + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Manifests) > 0 { + for _, s := range m.Manifests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageImportSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.To != nil { + l = m.To.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.ImportPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = m.ReferencePolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageImportStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Image != nil { + l = m.Image.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Tag) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Manifests) > 0 { + for _, e := range m.Manifests { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageLayer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.LayerSize)) + l = len(m.MediaType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageLayerData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LayerSize != nil { + n += 1 + sovGenerated(uint64(*m.LayerSize)) + } + l = len(m.MediaType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageLookupPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} + +func (m *ImageManifest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Digest) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MediaType) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.ManifestSize)) + l = len(m.Architecture) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.OS) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Variant) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageSignature) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Content != nil { + l = len(m.Content) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ImageIdentity) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.SignedClaims) > 0 { + for k, v := range m.SignedClaims { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Created != nil { + l = m.Created.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IssuedBy != nil { + l = m.IssuedBy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IssuedTo != nil { + l = m.IssuedTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ImageStream) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamImage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Image.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamImport) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamImportSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + if m.Repository != nil { + l = m.Repository.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageStreamImportStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Import != nil { + l = m.Import.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Repository != nil { + l = m.Repository.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageStreamLayers) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Blobs) > 0 { + for k, v := range m.Blobs { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Images) > 0 { + for k, v := range m.Images { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ImageStreamList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageStreamMapping) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Image.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Tag) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DockerImageRepository) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Tags) > 0 { + for _, e := range m.Tags { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.LookupPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DockerImageRepository) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Tags) > 0 { + for _, e := range m.Tags { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.PublicDockerImageRepository) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamTag) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Tag != nil { + l = m.Tag.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Generation)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Image.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LookupPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamTagList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageTag) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Image != nil { + l = m.Image.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ImageTagList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NamedTagEventList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Tag) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RepositoryImportSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.ImportPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = m.ReferencePolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RepositoryImportStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.AdditionalTags) > 0 { + for _, s := range m.AdditionalTags { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SecretList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SignatureCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SignatureGenericEntity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Organization) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CommonName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SignatureIssuer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.SignatureGenericEntity.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SignatureSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.SignatureGenericEntity.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PublicKeyID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TagEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Created.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DockerImageReference) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + return n +} + +func (m *TagEventCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + return n +} + +func (m *TagImportPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + n += 2 + l = len(m.ImportMode) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TagReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.From != nil { + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.Generation != nil { + n += 1 + sovGenerated(uint64(*m.Generation)) + } + l = m.ImportPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.ReferencePolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TagReferencePolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DockerImageReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DockerImageReference{`, + `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `}`, + }, "") + return s +} +func (this *Image) String() string { + if this == nil { + return "nil" + } + repeatedStringForDockerImageLayers := "[]ImageLayer{" + for _, f := range this.DockerImageLayers { + repeatedStringForDockerImageLayers += strings.Replace(strings.Replace(f.String(), "ImageLayer", "ImageLayer", 1), `&`, ``, 1) + "," + } + repeatedStringForDockerImageLayers += "}" + repeatedStringForSignatures := "[]ImageSignature{" + for _, f := range this.Signatures { + repeatedStringForSignatures += strings.Replace(strings.Replace(f.String(), "ImageSignature", "ImageSignature", 1), `&`, ``, 1) + "," + } + repeatedStringForSignatures += "}" + repeatedStringForDockerImageManifests := "[]ImageManifest{" + for _, f := range this.DockerImageManifests { + repeatedStringForDockerImageManifests += strings.Replace(strings.Replace(f.String(), "ImageManifest", "ImageManifest", 1), `&`, ``, 1) + "," + } + repeatedStringForDockerImageManifests += "}" + s := strings.Join([]string{`&Image{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DockerImageReference:` + fmt.Sprintf("%v", this.DockerImageReference) + `,`, + `DockerImageMetadata:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DockerImageMetadata), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `DockerImageMetadataVersion:` + fmt.Sprintf("%v", this.DockerImageMetadataVersion) + `,`, + `DockerImageManifest:` + fmt.Sprintf("%v", this.DockerImageManifest) + `,`, + `DockerImageLayers:` + repeatedStringForDockerImageLayers + `,`, + `Signatures:` + repeatedStringForSignatures + `,`, + `DockerImageSignatures:` + fmt.Sprintf("%v", this.DockerImageSignatures) + `,`, + `DockerImageManifestMediaType:` + fmt.Sprintf("%v", this.DockerImageManifestMediaType) + `,`, + `DockerImageConfig:` + fmt.Sprintf("%v", this.DockerImageConfig) + `,`, + `DockerImageManifests:` + repeatedStringForDockerImageManifests + `,`, + `}`, + }, "") + return s +} +func (this *ImageBlobReferences) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageBlobReferences{`, + `Layers:` + fmt.Sprintf("%v", this.Layers) + `,`, + `Config:` + valueToStringGenerated(this.Config) + `,`, + `ImageMissing:` + fmt.Sprintf("%v", this.ImageMissing) + `,`, + `Manifests:` + fmt.Sprintf("%v", this.Manifests) + `,`, + `}`, + }, "") + return s +} +func (this *ImageImportSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageImportSpec{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `To:` + strings.Replace(fmt.Sprintf("%v", this.To), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `ImportPolicy:` + strings.Replace(strings.Replace(this.ImportPolicy.String(), "TagImportPolicy", "TagImportPolicy", 1), `&`, ``, 1) + `,`, + `IncludeManifest:` + fmt.Sprintf("%v", this.IncludeManifest) + `,`, + `ReferencePolicy:` + strings.Replace(strings.Replace(this.ReferencePolicy.String(), "TagReferencePolicy", "TagReferencePolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageImportStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForManifests := "[]Image{" + for _, f := range this.Manifests { + repeatedStringForManifests += strings.Replace(strings.Replace(f.String(), "Image", "Image", 1), `&`, ``, 1) + "," + } + repeatedStringForManifests += "}" + s := strings.Join([]string{`&ImageImportStatus{`, + `Status:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "v1.Status", 1), `&`, ``, 1) + `,`, + `Image:` + strings.Replace(this.Image.String(), "Image", "Image", 1) + `,`, + `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`, + `Manifests:` + repeatedStringForManifests + `,`, + `}`, + }, "") + return s +} +func (this *ImageLayer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageLayer{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `LayerSize:` + fmt.Sprintf("%v", this.LayerSize) + `,`, + `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`, + `}`, + }, "") + return s +} +func (this *ImageLayerData) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageLayerData{`, + `LayerSize:` + valueToStringGenerated(this.LayerSize) + `,`, + `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`, + `}`, + }, "") + return s +} +func (this *ImageList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Image{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Image", "Image", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ImageList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ImageLookupPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageLookupPolicy{`, + `Local:` + fmt.Sprintf("%v", this.Local) + `,`, + `}`, + }, "") + return s +} +func (this *ImageManifest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageManifest{`, + `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, + `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`, + `ManifestSize:` + fmt.Sprintf("%v", this.ManifestSize) + `,`, + `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, + `OS:` + fmt.Sprintf("%v", this.OS) + `,`, + `Variant:` + fmt.Sprintf("%v", this.Variant) + `,`, + `}`, + }, "") + return s +} +func (this *ImageSignature) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]SignatureCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "SignatureCondition", "SignatureCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + keysForSignedClaims := make([]string, 0, len(this.SignedClaims)) + for k := range this.SignedClaims { + keysForSignedClaims = append(keysForSignedClaims, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSignedClaims) + mapStringForSignedClaims := "map[string]string{" + for _, k := range keysForSignedClaims { + mapStringForSignedClaims += fmt.Sprintf("%v: %v,", k, this.SignedClaims[k]) + } + mapStringForSignedClaims += "}" + s := strings.Join([]string{`&ImageSignature{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Content:` + valueToStringGenerated(this.Content) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `ImageIdentity:` + fmt.Sprintf("%v", this.ImageIdentity) + `,`, + `SignedClaims:` + mapStringForSignedClaims + `,`, + `Created:` + strings.Replace(fmt.Sprintf("%v", this.Created), "Time", "v1.Time", 1) + `,`, + `IssuedBy:` + strings.Replace(this.IssuedBy.String(), "SignatureIssuer", "SignatureIssuer", 1) + `,`, + `IssuedTo:` + strings.Replace(this.IssuedTo.String(), "SignatureSubject", "SignatureSubject", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStream) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageStream{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ImageStreamSpec", "ImageStreamSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ImageStreamStatus", "ImageStreamStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamImage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageStreamImage{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamImport) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageStreamImport{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ImageStreamImportSpec", "ImageStreamImportSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ImageStreamImportStatus", "ImageStreamImportStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamImportSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForImages := "[]ImageImportSpec{" + for _, f := range this.Images { + repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ImageImportSpec", "ImageImportSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForImages += "}" + s := strings.Join([]string{`&ImageStreamImportSpec{`, + `Import:` + fmt.Sprintf("%v", this.Import) + `,`, + `Repository:` + strings.Replace(this.Repository.String(), "RepositoryImportSpec", "RepositoryImportSpec", 1) + `,`, + `Images:` + repeatedStringForImages + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamImportStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForImages := "[]ImageImportStatus{" + for _, f := range this.Images { + repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ImageImportStatus", "ImageImportStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForImages += "}" + s := strings.Join([]string{`&ImageStreamImportStatus{`, + `Import:` + strings.Replace(this.Import.String(), "ImageStream", "ImageStream", 1) + `,`, + `Repository:` + strings.Replace(this.Repository.String(), "RepositoryImportStatus", "RepositoryImportStatus", 1) + `,`, + `Images:` + repeatedStringForImages + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamLayers) String() string { + if this == nil { + return "nil" + } + keysForBlobs := make([]string, 0, len(this.Blobs)) + for k := range this.Blobs { + keysForBlobs = append(keysForBlobs, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForBlobs) + mapStringForBlobs := "map[string]ImageLayerData{" + for _, k := range keysForBlobs { + mapStringForBlobs += fmt.Sprintf("%v: %v,", k, this.Blobs[k]) + } + mapStringForBlobs += "}" + keysForImages := make([]string, 0, len(this.Images)) + for k := range this.Images { + keysForImages = append(keysForImages, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForImages) + mapStringForImages := "map[string]ImageBlobReferences{" + for _, k := range keysForImages { + mapStringForImages += fmt.Sprintf("%v: %v,", k, this.Images[k]) + } + mapStringForImages += "}" + s := strings.Join([]string{`&ImageStreamLayers{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Blobs:` + mapStringForBlobs + `,`, + `Images:` + mapStringForImages + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ImageStream{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ImageStream", "ImageStream", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ImageStreamList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamMapping) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageStreamMapping{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`, + `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForTags := "[]TagReference{" + for _, f := range this.Tags { + repeatedStringForTags += strings.Replace(strings.Replace(f.String(), "TagReference", "TagReference", 1), `&`, ``, 1) + "," + } + repeatedStringForTags += "}" + s := strings.Join([]string{`&ImageStreamSpec{`, + `DockerImageRepository:` + fmt.Sprintf("%v", this.DockerImageRepository) + `,`, + `Tags:` + repeatedStringForTags + `,`, + `LookupPolicy:` + strings.Replace(strings.Replace(this.LookupPolicy.String(), "ImageLookupPolicy", "ImageLookupPolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForTags := "[]NamedTagEventList{" + for _, f := range this.Tags { + repeatedStringForTags += strings.Replace(strings.Replace(f.String(), "NamedTagEventList", "NamedTagEventList", 1), `&`, ``, 1) + "," + } + repeatedStringForTags += "}" + s := strings.Join([]string{`&ImageStreamStatus{`, + `DockerImageRepository:` + fmt.Sprintf("%v", this.DockerImageRepository) + `,`, + `Tags:` + repeatedStringForTags + `,`, + `PublicDockerImageRepository:` + fmt.Sprintf("%v", this.PublicDockerImageRepository) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamTag) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]TagEventCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "TagEventCondition", "TagEventCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ImageStreamTag{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Tag:` + strings.Replace(this.Tag.String(), "TagReference", "TagReference", 1) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`, + `LookupPolicy:` + strings.Replace(strings.Replace(this.LookupPolicy.String(), "ImageLookupPolicy", "ImageLookupPolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamTagList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ImageStreamTag{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ImageStreamTag", "ImageStreamTag", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ImageStreamTagList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ImageTag) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageTag{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(this.Spec.String(), "TagReference", "TagReference", 1) + `,`, + `Status:` + strings.Replace(this.Status.String(), "NamedTagEventList", "NamedTagEventList", 1) + `,`, + `Image:` + strings.Replace(this.Image.String(), "Image", "Image", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageTagList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ImageTag{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ImageTag", "ImageTag", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ImageTagList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NamedTagEventList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]TagEvent{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "TagEvent", "TagEvent", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + repeatedStringForConditions := "[]TagEventCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "TagEventCondition", "TagEventCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&NamedTagEventList{`, + `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`, + `Items:` + repeatedStringForItems + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *RepositoryImportSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RepositoryImportSpec{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `ImportPolicy:` + strings.Replace(strings.Replace(this.ImportPolicy.String(), "TagImportPolicy", "TagImportPolicy", 1), `&`, ``, 1) + `,`, + `IncludeManifest:` + fmt.Sprintf("%v", this.IncludeManifest) + `,`, + `ReferencePolicy:` + strings.Replace(strings.Replace(this.ReferencePolicy.String(), "TagReferencePolicy", "TagReferencePolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RepositoryImportStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForImages := "[]ImageImportStatus{" + for _, f := range this.Images { + repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ImageImportStatus", "ImageImportStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForImages += "}" + s := strings.Join([]string{`&RepositoryImportStatus{`, + `Status:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "v1.Status", 1), `&`, ``, 1) + `,`, + `Images:` + repeatedStringForImages + `,`, + `AdditionalTags:` + fmt.Sprintf("%v", this.AdditionalTags) + `,`, + `}`, + }, "") + return s +} +func (this *SecretList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Secret{" + for _, f := range this.Items { + repeatedStringForItems += fmt.Sprintf("%v", f) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&SecretList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *SignatureCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SignatureCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *SignatureGenericEntity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SignatureGenericEntity{`, + `Organization:` + fmt.Sprintf("%v", this.Organization) + `,`, + `CommonName:` + fmt.Sprintf("%v", this.CommonName) + `,`, + `}`, + }, "") + return s +} +func (this *SignatureIssuer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SignatureIssuer{`, + `SignatureGenericEntity:` + strings.Replace(strings.Replace(this.SignatureGenericEntity.String(), "SignatureGenericEntity", "SignatureGenericEntity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SignatureSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SignatureSubject{`, + `SignatureGenericEntity:` + strings.Replace(strings.Replace(this.SignatureGenericEntity.String(), "SignatureGenericEntity", "SignatureGenericEntity", 1), `&`, ``, 1) + `,`, + `PublicKeyID:` + fmt.Sprintf("%v", this.PublicKeyID) + `,`, + `}`, + }, "") + return s +} +func (this *TagEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TagEvent{`, + `Created:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Created), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `DockerImageReference:` + fmt.Sprintf("%v", this.DockerImageReference) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `}`, + }, "") + return s +} +func (this *TagEventCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TagEventCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `}`, + }, "") + return s +} +func (this *TagImportPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TagImportPolicy{`, + `Insecure:` + fmt.Sprintf("%v", this.Insecure) + `,`, + `Scheduled:` + fmt.Sprintf("%v", this.Scheduled) + `,`, + `ImportMode:` + fmt.Sprintf("%v", this.ImportMode) + `,`, + `}`, + }, "") + return s +} +func (this *TagReference) String() string { + if this == nil { + return "nil" + } + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&TagReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `Reference:` + fmt.Sprintf("%v", this.Reference) + `,`, + `Generation:` + valueToStringGenerated(this.Generation) + `,`, + `ImportPolicy:` + strings.Replace(strings.Replace(this.ImportPolicy.String(), "TagImportPolicy", "TagImportPolicy", 1), `&`, ``, 1) + `,`, + `ReferencePolicy:` + strings.Replace(strings.Replace(this.ReferencePolicy.String(), "TagReferencePolicy", "TagReferencePolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TagReferencePolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TagReferencePolicy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DockerImageReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DockerImageReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DockerImageReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Registry", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Registry = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tag = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Image) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Image: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Image: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageReference", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageReference = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DockerImageMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageMetadataVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageMetadataVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageManifest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageManifest = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageLayers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageLayers = append(m.DockerImageLayers, ImageLayer{}) + if err := m.DockerImageLayers[len(m.DockerImageLayers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signatures", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signatures = append(m.Signatures, ImageSignature{}) + if err := m.Signatures[len(m.Signatures)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageSignatures", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageSignatures = append(m.DockerImageSignatures, make([]byte, postIndex-iNdEx)) + copy(m.DockerImageSignatures[len(m.DockerImageSignatures)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageManifestMediaType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageManifestMediaType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageConfig", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageConfig = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageManifests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageManifests = append(m.DockerImageManifests, ImageManifest{}) + if err := m.DockerImageManifests[len(m.DockerImageManifests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageBlobReferences) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageBlobReferences: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageBlobReferences: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Layers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Layers = append(m.Layers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Config = &s + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageMissing", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ImageMissing = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Manifests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Manifests = append(m.Manifests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageImportSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageImportSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageImportSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.To == nil { + m.To = &v11.LocalObjectReference{} + } + if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImportPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ImportPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeManifest", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeManifest = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ReferencePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageImportStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageImportStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageImportStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &Image{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tag = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Manifests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Manifests = append(m.Manifests, Image{}) + if err := m.Manifests[len(m.Manifests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageLayer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageLayer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageLayer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LayerSize", wireType) + } + m.LayerSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LayerSize |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MediaType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageLayerData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageLayerData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageLayerData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LayerSize", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LayerSize = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MediaType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Image{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageLookupPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageLookupPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageLookupPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Local", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Local = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageManifest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageManifest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageManifest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MediaType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ManifestSize", wireType) + } + m.ManifestSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ManifestSize |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Architecture = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OS = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Variant", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Variant = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageSignature) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageSignature: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageSignature: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Content", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Content = append(m.Content[:0], dAtA[iNdEx:postIndex]...) + if m.Content == nil { + m.Content = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, SignatureCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageIdentity", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageIdentity = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignedClaims", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SignedClaims == nil { + m.SignedClaims = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.SignedClaims[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Created == nil { + m.Created = &v1.Time{} + } + if err := m.Created.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IssuedBy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IssuedBy == nil { + m.IssuedBy = &SignatureIssuer{} + } + if err := m.IssuedBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IssuedTo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IssuedTo == nil { + m.IssuedTo = &SignatureSubject{} + } + if err := m.IssuedTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStream) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStream: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStream: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamImage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamImage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamImage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamImport) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamImport: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamImport: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamImportSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamImportSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamImportSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Import", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Import = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Repository == nil { + m.Repository = &RepositoryImportSpec{} + } + if err := m.Repository.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, ImageImportSpec{}) + if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamImportStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamImportStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamImportStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Import", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Import == nil { + m.Import = &ImageStream{} + } + if err := m.Import.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Repository == nil { + m.Repository = &RepositoryImportStatus{} + } + if err := m.Repository.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, ImageImportStatus{}) + if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamLayers) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamLayers: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamLayers: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blobs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Blobs == nil { + m.Blobs = make(map[string]ImageLayerData) + } + var mapkey string + mapvalue := &ImageLayerData{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ImageLayerData{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Blobs[mapkey] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Images == nil { + m.Images = make(map[string]ImageBlobReferences) + } + var mapkey string + mapvalue := &ImageBlobReferences{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ImageBlobReferences{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Images[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ImageStream{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamMapping) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamMapping: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamMapping: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tag = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageRepository", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageRepository = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tags = append(m.Tags, TagReference{}) + if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LookupPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LookupPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageRepository", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageRepository = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tags = append(m.Tags, NamedTagEventList{}) + if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PublicDockerImageRepository", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PublicDockerImageRepository = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamTag) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamTag: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamTag: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tag == nil { + m.Tag = &TagReference{} + } + if err := m.Tag.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, TagEventCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LookupPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LookupPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamTagList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamTagList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamTagList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ImageStreamTag{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageTag) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageTag: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageTag: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &TagReference{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &NamedTagEventList{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &Image{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageTagList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageTagList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageTagList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ImageTag{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedTagEventList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedTagEventList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedTagEventList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tag = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, TagEvent{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, TagEventCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RepositoryImportSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RepositoryImportSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RepositoryImportSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImportPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ImportPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeManifest", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeManifest = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ReferencePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RepositoryImportStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RepositoryImportStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RepositoryImportStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, ImageImportStatus{}) + if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalTags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdditionalTags = append(m.AdditionalTags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, v11.Secret{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignatureCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignatureCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignatureCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = SignatureConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignatureGenericEntity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignatureGenericEntity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignatureGenericEntity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Organization", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Organization = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CommonName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignatureIssuer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignatureIssuer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignatureIssuer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignatureGenericEntity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SignatureGenericEntity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignatureSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignatureSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignatureSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignatureGenericEntity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SignatureGenericEntity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PublicKeyID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PublicKeyID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Created.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageReference", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageReference = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagEventCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagEventCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagEventCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = TagEventConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagImportPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagImportPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagImportPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Insecure", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Insecure = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheduled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Scheduled = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImportMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImportMode = ImportModeType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.From == nil { + m.From = &v11.ObjectReference{} + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Reference = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Generation = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImportPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ImportPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ReferencePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagReferencePolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagReferencePolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagReferencePolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = TagReferencePolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/image/v1/generated.proto b/vendor/github.com/openshift/api/image/v1/generated.proto new file mode 100644 index 0000000000000..dabdc6d84a5aa --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/generated.proto @@ -0,0 +1,746 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.image.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/image/v1"; + +// DockerImageReference points to a container image. +message DockerImageReference { + // Registry is the registry that contains the container image + optional string registry = 1; + + // Namespace is the namespace that contains the container image + optional string namespace = 2; + + // Name is the name of the container image + optional string name = 3; + + // Tag is which tag of the container image is being referenced + optional string tag = 4; + + // ID is the identifier for the container image + optional string iD = 5; +} + +// Image is an immutable representation of a container image and metadata at a point in time. +// Images are named by taking a hash of their contents (metadata and content) and any change +// in format, content, or metadata results in a new name. The images resource is primarily +// for use by cluster administrators and integrations like the cluster image registry - end +// users instead access images via the imagestreamtags or imagestreamimages resources. While +// image metadata is stored in the API, any integration that implements the container image +// registry API must provide its own storage for the raw manifest data, image config, and +// layer contents. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message Image { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // dockerImageReference is the string that can be used to pull this image. + optional string dockerImageReference = 2; + + // dockerImageMetadata contains metadata about this image + // +patchStrategy=replace + // +kubebuilder:pruning:PreserveUnknownFields + optional .k8s.io.apimachinery.pkg.runtime.RawExtension dockerImageMetadata = 3; + + // dockerImageMetadataVersion conveys the version of the object, which if empty defaults to "1.0" + optional string dockerImageMetadataVersion = 4; + + // dockerImageManifest is the raw JSON of the manifest + optional string dockerImageManifest = 5; + + // dockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list. + repeated ImageLayer dockerImageLayers = 6; + + // signatures holds all signatures of the image. + // +patchMergeKey=name + // +patchStrategy=merge + repeated ImageSignature signatures = 7; + + // dockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. + repeated bytes dockerImageSignatures = 8; + + // dockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. + optional string dockerImageManifestMediaType = 9; + + // dockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. + // Will not be set when the image represents a manifest list. + optional string dockerImageConfig = 10; + + // dockerImageManifests holds information about sub-manifests when the image represents a manifest list. + // When this field is present, no DockerImageLayers should be specified. + repeated ImageManifest dockerImageManifests = 11; +} + +// ImageBlobReferences describes the blob references within an image. +message ImageBlobReferences { + // imageMissing is true if the image is referenced by the image stream but the image + // object has been deleted from the API by an administrator. When this field is set, + // layers and config fields may be empty and callers that depend on the image metadata + // should consider the image to be unavailable for download or viewing. + // +optional + optional bool imageMissing = 3; + + // layers is the list of blobs that compose this image, from base layer to top layer. + // All layers referenced by this array will be defined in the blobs map. Some images + // may have zero layers. + // +optional + repeated string layers = 1; + + // config, if set, is the blob that contains the image config. Some images do + // not have separate config blobs and this field will be set to nil if so. + // +optional + optional string config = 2; + + // manifests is the list of other image names that this image points + // to. For a single architecture image, it is empty. For a multi-arch + // image, it consists of the digests of single architecture images, + // such images shouldn't have layers nor config. + // +optional + repeated string manifests = 4; +} + +// ImageImportSpec describes a request to import a specific image. +message ImageImportSpec { + // from is the source of an image to import; only kind DockerImage is allowed + optional .k8s.io.api.core.v1.ObjectReference from = 1; + + // to is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used + optional .k8s.io.api.core.v1.LocalObjectReference to = 2; + + // importPolicy is the policy controlling how the image is imported + optional TagImportPolicy importPolicy = 3; + + // referencePolicy defines how other components should consume the image + optional TagReferencePolicy referencePolicy = 5; + + // includeManifest determines if the manifest for each image is returned in the response + optional bool includeManifest = 4; +} + +// ImageImportStatus describes the result of an image import. +message ImageImportStatus { + // status is the status of the image import, including errors encountered while retrieving the image + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 1; + + // image is the metadata of that image, if the image was located + optional Image image = 2; + + // tag is the tag this image was located under, if any + optional string tag = 3; + + // manifests holds sub-manifests metadata when importing a manifest list + repeated Image manifests = 4; +} + +// ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none. +message ImageLayer { + // name of the layer as defined by the underlying store. + optional string name = 1; + + // size of the layer in bytes as defined by the underlying store. + optional int64 size = 2; + + // mediaType of the referenced object. + optional string mediaType = 3; +} + +// ImageLayerData contains metadata about an image layer. +message ImageLayerData { + // size of the layer in bytes as defined by the underlying store. This field is + // optional if the necessary information about size is not available. + optional int64 size = 1; + + // mediaType of the referenced object. + optional string mediaType = 2; +} + +// ImageList is a list of Image objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of images + repeated Image items = 2; +} + +// ImageLookupPolicy describes how an image stream can be used to override the image references +// used by pods, builds, and other resources in a namespace. +message ImageLookupPolicy { + // local will change the docker short image references (like "mysql" or + // "php:latest") on objects in this namespace to the image ID whenever they match + // this image stream, instead of reaching out to a remote registry. The name will + // be fully qualified to an image ID if found. The tag's referencePolicy is taken + // into account on the replaced value. Only works within the current namespace. + optional bool local = 3; +} + +// ImageManifest represents sub-manifests of a manifest list. The Digest field points to a regular +// Image object. +message ImageManifest { + // digest is the unique identifier for the manifest. It refers to an Image object. + optional string digest = 1; + + // mediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, + // application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json. + optional string mediaType = 2; + + // manifestSize represents the size of the raw object contents, in bytes. + optional int64 manifestSize = 3; + + // architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`. + optional string architecture = 4; + + // os specifies the operating system, for example `linux`. + optional string os = 5; + + // variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU + // variant of the ARM CPU. + optional string variant = 6; +} + +// ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims +// as long as the signature is trusted. Based on this information it is possible to restrict runnable images +// to those matching cluster-wide policy. +// Mandatory fields should be parsed by clients doing image verification. The others are parsed from +// signature's content by the server. They serve just an informative purpose. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageSignature { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Required: Describes a type of stored blob. + optional string type = 2; + + // Required: An opaque binary string which is an image's signature. + optional bytes content = 3; + + // conditions represent the latest available observations of a signature's current state. + // +patchMergeKey=type + // +patchStrategy=merge + repeated SignatureCondition conditions = 4; + + // A human readable string representing image's identity. It could be a product name and version, or an + // image pull spec (e.g. "registry.access.redhat.com/rhel7/rhel:7.2"). + optional string imageIdentity = 5; + + // Contains claims from the signature. + map signedClaims = 6; + + // If specified, it is the time of signature's creation. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time created = 7; + + // If specified, it holds information about an issuer of signing certificate or key (a person or entity + // who signed the signing certificate or key). + optional SignatureIssuer issuedBy = 8; + + // If specified, it holds information about a subject of signing certificate or key (a person or entity + // who signed the image). + optional SignatureSubject issuedTo = 9; +} + +// An ImageStream stores a mapping of tags to images, metadata overrides that are applied +// when images are tagged in a stream, and an optional reference to a container image +// repository on a registry. Users typically update the spec.tags field to point to external +// images which are imported from container registries using credentials in your namespace +// with the pull secret type, or to existing image stream tags and images which are +// immediately accessible for tagging or pulling. The history of images applied to a tag +// is visible in the status.tags field and any user who can view an image stream is allowed +// to tag that image into their own image streams. Access to pull images from the integrated +// registry is granted by having the "get imagestreams/layers" permission on a given image +// stream. Users may remove a tag by deleting the imagestreamtag resource, which causes both +// spec and status for that tag to be removed. Image stream history is retained until an +// administrator runs the prune operation, which removes references that are no longer in +// use. To preserve a historical image, ensure there is a tag in spec pointing to that image +// by its digest. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStream { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec describes the desired state of this stream + // +optional + optional ImageStreamSpec spec = 2; + + // status describes the current state of this stream + // +optional + optional ImageStreamStatus status = 3; +} + +// ImageStreamImage represents an Image that is retrieved by image name from an ImageStream. +// User interfaces and regular users can use this resource to access the metadata details of +// a tagged image in the image stream history for viewing, since Image resources are not +// directly accessible to end users. A not found error will be returned if no such image is +// referenced by a tag within the ImageStream. Images are created when spec tags are set on +// an image stream that represent an image in an external registry, when pushing to the +// integrated registry, or when tagging an existing image from one image stream to another. +// The name of an image stream image is in the form "@", where the digest is +// the content addressible identifier for the image (sha256:xxxxx...). You can use +// ImageStreamImages as the from.kind of an image stream spec tag to reference an image +// exactly. The only operations supported on the imagestreamimage endpoint are retrieving +// the image. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStreamImage { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // image associated with the ImageStream and image name. + optional Image image = 2; +} + +// The image stream import resource provides an easy way for a user to find and import container images +// from other container image registries into the server. Individual images or an entire image repository may +// be imported, and users may choose to see the results of the import prior to tagging the resulting +// images into the specified image stream. +// +// This API is intended for end-user tools that need to see the metadata of the image prior to import +// (for instance, to generate an application from it). Clients that know the desired image can continue +// to create spec.tags directly into their image streams. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStreamImport { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is a description of the images that the user wishes to import + optional ImageStreamImportSpec spec = 2; + + // status is the result of importing the image + optional ImageStreamImportStatus status = 3; +} + +// ImageStreamImportSpec defines what images should be imported. +message ImageStreamImportSpec { + // import indicates whether to perform an import - if so, the specified tags are set on the spec + // and status of the image stream defined by the type meta. + optional bool import = 1; + + // repository is an optional import of an entire container image repository. A maximum limit on the + // number of tags imported this way is imposed by the server. + optional RepositoryImportSpec repository = 2; + + // images are a list of individual images to import. + repeated ImageImportSpec images = 3; +} + +// ImageStreamImportStatus contains information about the status of an image stream import. +message ImageStreamImportStatus { + // import is the image stream that was successfully updated or created when 'to' was set. + optional ImageStream import = 1; + + // repository is set if spec.repository was set to the outcome of the import + optional RepositoryImportStatus repository = 2; + + // images is set with the result of importing spec.images + repeated ImageImportStatus images = 3; +} + +// ImageStreamLayers describes information about the layers referenced by images in this +// image stream. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStreamLayers { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // blobs is a map of blob name to metadata about the blob. + map blobs = 2; + + // images is a map between an image name and the names of the blobs and config that + // comprise the image. + map images = 3; +} + +// ImageStreamList is a list of ImageStream objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStreamList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of imageStreams + repeated ImageStream items = 2; +} + +// ImageStreamMapping represents a mapping from a single image stream tag to a container +// image as well as the reference to the container image stream the image came from. This +// resource is used by privileged integrators to create an image resource and to associate +// it with an image stream in the status tags field. Creating an ImageStreamMapping will +// allow any user who can view the image stream to tag or pull that image, so only create +// mappings where the user has proven they have access to the image contents directly. +// The only operation supported for this resource is create and the metadata name and +// namespace should be set to the image stream containing the tag that should be updated. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStreamMapping { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // image is a container image. + optional Image image = 2; + + // tag is a string value this image can be located with inside the stream. + optional string tag = 3; +} + +// ImageStreamSpec represents options for ImageStreams. +message ImageStreamSpec { + // lookupPolicy controls how other resources reference images within this namespace. + optional ImageLookupPolicy lookupPolicy = 3; + + // dockerImageRepository is optional, if specified this stream is backed by a container repository on this server + // Deprecated: This field is deprecated as of v3.7 and will be removed in a future release. + // Specify the source for the tags to be imported in each tag via the spec.tags.from reference instead. + optional string dockerImageRepository = 1; + + // tags map arbitrary string values to specific image locators + // +patchMergeKey=name + // +patchStrategy=merge + repeated TagReference tags = 2; +} + +// ImageStreamStatus contains information about the state of this image stream. +message ImageStreamStatus { + // dockerImageRepository represents the effective location this stream may be accessed at. + // May be empty until the server determines where the repository is located + optional string dockerImageRepository = 1; + + // publicDockerImageRepository represents the public location from where the image can + // be pulled outside the cluster. This field may be empty if the administrator + // has not exposed the integrated registry externally. + optional string publicDockerImageRepository = 3; + + // tags are a historical record of images associated with each tag. The first entry in the + // TagEvent array is the currently tagged image. + // +patchMergeKey=tag + // +patchStrategy=merge + repeated NamedTagEventList tags = 2; +} + +// ImageStreamTag represents an Image that is retrieved by tag name from an ImageStream. +// Use this resource to interact with the tags and images in an image stream by tag, or +// to see the image details for a particular tag. The image associated with this resource +// is the most recently successfully tagged, imported, or pushed image (as described in the +// image stream status.tags.items list for this tag). If an import is in progress or has +// failed the previous image will be shown. Deleting an image stream tag clears both the +// status and spec fields of an image stream. If no image can be retrieved for a given tag, +// a not found error will be returned. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStreamTag { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // tag is the spec tag associated with this image stream tag, and it may be null + // if only pushes have occurred to this image stream. + optional TagReference tag = 2; + + // generation is the current generation of the tagged image - if tag is provided + // and this value is not equal to the tag generation, a user has requested an + // import that has not completed, or conditions will be filled out indicating any + // error. + optional int64 generation = 3; + + // lookupPolicy indicates whether this tag will handle image references in this + // namespace. + optional ImageLookupPolicy lookupPolicy = 6; + + // conditions is an array of conditions that apply to the image stream tag. + repeated TagEventCondition conditions = 4; + + // image associated with the ImageStream and tag. + optional Image image = 5; +} + +// ImageStreamTagList is a list of ImageStreamTag objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStreamTagList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of image stream tags + repeated ImageStreamTag items = 2; +} + +// ImageTag represents a single tag within an image stream and includes the spec, +// the status history, and the currently referenced image (if any) of the provided +// tag. This type replaces the ImageStreamTag by providing a full view of the tag. +// ImageTags are returned for every spec or status tag present on the image stream. +// If no tag exists in either form a not found error will be returned by the API. +// A create operation will succeed if no spec tag has already been defined and the +// spec field is set. Delete will remove both spec and status elements from the +// image stream. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageTag { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the spec tag associated with this image stream tag, and it may be null + // if only pushes have occurred to this image stream. + optional TagReference spec = 2; + + // status is the status tag details associated with this image stream tag, and it + // may be null if no push or import has been performed. + optional NamedTagEventList status = 3; + + // image is the details of the most recent image stream status tag, and it may be + // null if import has not completed or an administrator has deleted the image + // object. To verify this is the most recent image, you must verify the generation + // of the most recent status.items entry matches the spec tag (if a spec tag is + // set). This field will not be set when listing image tags. + optional Image image = 4; +} + +// ImageTagList is a list of ImageTag objects. When listing image tags, the image +// field is not populated. Tags are returned in alphabetical order by image stream +// and then tag. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageTagList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of image stream tags + repeated ImageTag items = 2; +} + +// NamedTagEventList relates a tag to its image history. +message NamedTagEventList { + // tag is the tag for which the history is recorded + optional string tag = 1; + + // Standard object's metadata. + repeated TagEvent items = 2; + + // conditions is an array of conditions that apply to the tag event list. + repeated TagEventCondition conditions = 3; +} + +// RepositoryImportSpec describes a request to import images from a container image repository. +message RepositoryImportSpec { + // from is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed + optional .k8s.io.api.core.v1.ObjectReference from = 1; + + // importPolicy is the policy controlling how the image is imported + optional TagImportPolicy importPolicy = 2; + + // referencePolicy defines how other components should consume the image + optional TagReferencePolicy referencePolicy = 4; + + // includeManifest determines if the manifest for each image is returned in the response + optional bool includeManifest = 3; +} + +// RepositoryImportStatus describes the result of an image repository import +message RepositoryImportStatus { + // status reflects whether any failure occurred during import + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 1; + + // images is a list of images successfully retrieved by the import of the repository. + repeated ImageImportStatus images = 2; + + // additionalTags are tags that exist in the repository but were not imported because + // a maximum limit of automatic imports was applied. + repeated string additionalTags = 3; +} + +// SecretList is a list of Secret. +// +openshift:compatibility-gen:level=1 +message SecretList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of secret objects. + // More info: https://kubernetes.io/docs/concepts/configuration/secret + repeated .k8s.io.api.core.v1.Secret items = 2; +} + +// SignatureCondition describes an image signature condition of particular kind at particular probe time. +message SignatureCondition { + // type of signature condition, Complete or Failed. + optional string type = 1; + + // status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition was checked. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + + // Last time the condition transit from one status to another. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // (brief) reason for the condition's last transition. + optional string reason = 5; + + // Human readable message indicating details about last transition. + optional string message = 6; +} + +// SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject +// of signing certificate or key. +message SignatureGenericEntity { + // organization name. + optional string organization = 1; + + // Common name (e.g. openshift-signing-service). + optional string commonName = 2; +} + +// SignatureIssuer holds information about an issuer of signing certificate or key. +message SignatureIssuer { + optional SignatureGenericEntity signatureGenericEntity = 1; +} + +// SignatureSubject holds information about a person or entity who created the signature. +message SignatureSubject { + optional SignatureGenericEntity signatureGenericEntity = 1; + + // If present, it is a human readable key id of public key belonging to the subject used to verify image + // signature. It should contain at least 64 lowest bits of public key's fingerprint (e.g. + // 0x685ebe62bf278440). + optional string publicKeyID = 2; +} + +// TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag. +message TagEvent { + // created holds the time the TagEvent was created + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time created = 1; + + // dockerImageReference is the string that can be used to pull this image + optional string dockerImageReference = 2; + + // image is the image + optional string image = 3; + + // generation is the spec tag generation that resulted in this tag being updated + optional int64 generation = 4; +} + +// TagEventCondition contains condition information for a tag event. +message TagEventCondition { + // type of tag event condition, currently only ImportSuccess + optional string type = 1; + + // status of the condition, one of True, False, Unknown. + optional string status = 2; + + // lastTransitionTime is the time the condition transitioned from one status to another. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // reason is a brief machine readable explanation for the condition's last transition. + optional string reason = 4; + + // message is a human readable description of the details about last transition, complementing reason. + optional string message = 5; + + // generation is the spec tag generation that this status corresponds to + optional int64 generation = 6; +} + +// TagImportPolicy controls how images related to this tag will be imported. +message TagImportPolicy { + // insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import. + optional bool insecure = 1; + + // scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported + optional bool scheduled = 2; + + // importMode describes how to import an image manifest. + optional string importMode = 3; +} + +// TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track. +message TagReference { + // name of the tag + optional string name = 1; + + // Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags. + // +optional + map annotations = 2; + + // Optional; if specified, a reference to another image that this tag should point to. Valid values + // are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references + // can only reference a tag within this same ImageStream. + optional .k8s.io.api.core.v1.ObjectReference from = 3; + + // reference states if the tag will be imported. Default value is false, which means the tag will + // be imported. + optional bool reference = 4; + + // generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference + // is changed the generation is set to match the current stream generation (which is incremented every + // time spec is changed). Other processes in the system like the image importer observe that the + // generation of spec tag is newer than the generation recorded in the status and use that as a trigger + // to import the newest remote tag. To trigger a new import, clients may set this value to zero which + // will reset the generation to the latest stream generation. Legacy clients will send this value as + // nil which will be merged with the current tag generation. + // +optional + optional int64 generation = 5; + + // importPolicy is information that controls how images may be imported by the server. + optional TagImportPolicy importPolicy = 6; + + // referencePolicy defines how other components should consume the image. + optional TagReferencePolicy referencePolicy = 7; +} + +// TagReferencePolicy describes how pull-specs for images in this image stream tag are generated when +// image change triggers in deployment configs or builds are resolved. This allows the image stream +// author to control how images are accessed. +message TagReferencePolicy { + // type determines how the image pull spec should be transformed when the image stream tag is used in + // deployment config triggers or new builds. The default value is `Source`, indicating the original + // location of the image should be used (if imported). The user may also specify `Local`, indicating + // that the pull spec should point to the integrated container image registry and leverage the registry's + // ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this + // image to be managed from the image stream's namespace, so others on the platform can access a remote + // image but have no access to the remote secret. It also allows the image layers to be mirrored into + // the local registry which the images can still be pulled even if the upstream registry is unavailable. + optional string type = 1; +} + diff --git a/vendor/github.com/openshift/api/image/v1/legacy.go b/vendor/github.com/openshift/api/image/v1/legacy.go new file mode 100644 index 0000000000000..02bbaa2906fed --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/legacy.go @@ -0,0 +1,33 @@ +package v1 + +import ( + "github.com/openshift/api/image/docker10" + "github.com/openshift/api/image/dockerpre012" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, docker10.AddToSchemeInCoreGroup, dockerpre012.AddToSchemeInCoreGroup, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &Image{}, + &ImageList{}, + &ImageSignature{}, + &ImageStream{}, + &ImageStreamList{}, + &ImageStreamMapping{}, + &ImageStreamTag{}, + &ImageStreamTagList{}, + &ImageStreamImage{}, + &ImageStreamImport{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/image/v1/register.go b/vendor/github.com/openshift/api/image/v1/register.go new file mode 100644 index 0000000000000..0d924103a642b --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/register.go @@ -0,0 +1,54 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/openshift/api/image/docker10" + "github.com/openshift/api/image/dockerpre012" +) + +var ( + GroupName = "image.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, docker10.AddToScheme, dockerpre012.AddToScheme, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &Image{}, + &ImageList{}, + &ImageSignature{}, + &ImageStream{}, + &ImageStreamList{}, + &ImageStreamMapping{}, + &ImageStreamTag{}, + &ImageStreamTagList{}, + &ImageStreamImage{}, + &ImageStreamLayers{}, + &ImageStreamImport{}, + &ImageTag{}, + &ImageTagList{}, + &SecretList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/image/v1/types.go b/vendor/github.com/openshift/api/image/v1/types.go new file mode 100644 index 0000000000000..d4ee4bff69799 --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/types.go @@ -0,0 +1,766 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageList is a list of Image objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of images + Items []Image `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Image is an immutable representation of a container image and metadata at a point in time. +// Images are named by taking a hash of their contents (metadata and content) and any change +// in format, content, or metadata results in a new name. The images resource is primarily +// for use by cluster administrators and integrations like the cluster image registry - end +// users instead access images via the imagestreamtags or imagestreamimages resources. While +// image metadata is stored in the API, any integration that implements the container image +// registry API must provide its own storage for the raw manifest data, image config, and +// layer contents. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Image struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // dockerImageReference is the string that can be used to pull this image. + DockerImageReference string `json:"dockerImageReference,omitempty" protobuf:"bytes,2,opt,name=dockerImageReference"` + // dockerImageMetadata contains metadata about this image + // +patchStrategy=replace + // +kubebuilder:pruning:PreserveUnknownFields + DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty" patchStrategy:"replace" protobuf:"bytes,3,opt,name=dockerImageMetadata"` + // dockerImageMetadataVersion conveys the version of the object, which if empty defaults to "1.0" + DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty" protobuf:"bytes,4,opt,name=dockerImageMetadataVersion"` + // dockerImageManifest is the raw JSON of the manifest + DockerImageManifest string `json:"dockerImageManifest,omitempty" protobuf:"bytes,5,opt,name=dockerImageManifest"` + // dockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list. + DockerImageLayers []ImageLayer `json:"dockerImageLayers,omitempty" protobuf:"bytes,6,rep,name=dockerImageLayers"` + // signatures holds all signatures of the image. + // +patchMergeKey=name + // +patchStrategy=merge + Signatures []ImageSignature `json:"signatures,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=signatures"` + // dockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. + DockerImageSignatures [][]byte `json:"dockerImageSignatures,omitempty" protobuf:"bytes,8,rep,name=dockerImageSignatures"` + // dockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. + DockerImageManifestMediaType string `json:"dockerImageManifestMediaType,omitempty" protobuf:"bytes,9,opt,name=dockerImageManifestMediaType"` + // dockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. + // Will not be set when the image represents a manifest list. + DockerImageConfig string `json:"dockerImageConfig,omitempty" protobuf:"bytes,10,opt,name=dockerImageConfig"` + // dockerImageManifests holds information about sub-manifests when the image represents a manifest list. + // When this field is present, no DockerImageLayers should be specified. + DockerImageManifests []ImageManifest `json:"dockerImageManifests,omitempty" protobuf:"bytes,11,rep,name=dockerImageManifests"` +} + +// ImageManifest represents sub-manifests of a manifest list. The Digest field points to a regular +// Image object. +type ImageManifest struct { + // digest is the unique identifier for the manifest. It refers to an Image object. + Digest string `json:"digest" protobuf:"bytes,1,opt,name=digest"` + // mediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, + // application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json. + MediaType string `json:"mediaType" protobuf:"bytes,2,opt,name=mediaType"` + // manifestSize represents the size of the raw object contents, in bytes. + ManifestSize int64 `json:"manifestSize" protobuf:"varint,3,opt,name=manifestSize"` + // architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`. + Architecture string `json:"architecture" protobuf:"bytes,4,opt,name=architecture"` + // os specifies the operating system, for example `linux`. + OS string `json:"os" protobuf:"bytes,5,opt,name=os"` + // variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU + // variant of the ARM CPU. + Variant string `json:"variant,omitempty" protobuf:"bytes,6,opt,name=variant"` +} + +// ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none. +type ImageLayer struct { + // name of the layer as defined by the underlying store. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // size of the layer in bytes as defined by the underlying store. + LayerSize int64 `json:"size" protobuf:"varint,2,opt,name=size"` + // mediaType of the referenced object. + MediaType string `json:"mediaType" protobuf:"bytes,3,opt,name=mediaType"` +} + +// +genclient +// +genclient:onlyVerbs=create,delete +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims +// as long as the signature is trusted. Based on this information it is possible to restrict runnable images +// to those matching cluster-wide policy. +// Mandatory fields should be parsed by clients doing image verification. The others are parsed from +// signature's content by the server. They serve just an informative purpose. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageSignature struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Required: Describes a type of stored blob. + Type string `json:"type" protobuf:"bytes,2,opt,name=type"` + // Required: An opaque binary string which is an image's signature. + Content []byte `json:"content" protobuf:"bytes,3,opt,name=content"` + // conditions represent the latest available observations of a signature's current state. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` + + // Following metadata fields will be set by server if the signature content is successfully parsed and + // the information available. + + // A human readable string representing image's identity. It could be a product name and version, or an + // image pull spec (e.g. "registry.access.redhat.com/rhel7/rhel:7.2"). + ImageIdentity string `json:"imageIdentity,omitempty" protobuf:"bytes,5,opt,name=imageIdentity"` + // Contains claims from the signature. + SignedClaims map[string]string `json:"signedClaims,omitempty" protobuf:"bytes,6,rep,name=signedClaims"` + // If specified, it is the time of signature's creation. + Created *metav1.Time `json:"created,omitempty" protobuf:"bytes,7,opt,name=created"` + // If specified, it holds information about an issuer of signing certificate or key (a person or entity + // who signed the signing certificate or key). + IssuedBy *SignatureIssuer `json:"issuedBy,omitempty" protobuf:"bytes,8,opt,name=issuedBy"` + // If specified, it holds information about a subject of signing certificate or key (a person or entity + // who signed the image). + IssuedTo *SignatureSubject `json:"issuedTo,omitempty" protobuf:"bytes,9,opt,name=issuedTo"` +} + +// SignatureConditionType is a type of image signature condition. +type SignatureConditionType string + +// SignatureCondition describes an image signature condition of particular kind at particular probe time. +type SignatureCondition struct { + // type of signature condition, Complete or Failed. + Type SignatureConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=SignatureConditionType"` + // status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition was checked. + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` + // Last time the condition transit from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // (brief) reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +// SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject +// of signing certificate or key. +type SignatureGenericEntity struct { + // organization name. + Organization string `json:"organization,omitempty" protobuf:"bytes,1,opt,name=organization"` + // Common name (e.g. openshift-signing-service). + CommonName string `json:"commonName,omitempty" protobuf:"bytes,2,opt,name=commonName"` +} + +// SignatureIssuer holds information about an issuer of signing certificate or key. +type SignatureIssuer struct { + SignatureGenericEntity `json:",inline" protobuf:"bytes,1,opt,name=signatureGenericEntity"` +} + +// SignatureSubject holds information about a person or entity who created the signature. +type SignatureSubject struct { + SignatureGenericEntity `json:",inline" protobuf:"bytes,1,opt,name=signatureGenericEntity"` + // If present, it is a human readable key id of public key belonging to the subject used to verify image + // signature. It should contain at least 64 lowest bits of public key's fingerprint (e.g. + // 0x685ebe62bf278440). + PublicKeyID string `json:"publicKeyID" protobuf:"bytes,2,opt,name=publicKeyID"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamList is a list of ImageStream objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStreamList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of imageStreams + Items []ImageStream `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:method=Secrets,verb=get,subresource=secrets,result=github.com/openshift/api/image/v1.SecretList +// +genclient:method=Layers,verb=get,subresource=layers,result=github.com/openshift/api/image/v1.ImageStreamLayers +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// An ImageStream stores a mapping of tags to images, metadata overrides that are applied +// when images are tagged in a stream, and an optional reference to a container image +// repository on a registry. Users typically update the spec.tags field to point to external +// images which are imported from container registries using credentials in your namespace +// with the pull secret type, or to existing image stream tags and images which are +// immediately accessible for tagging or pulling. The history of images applied to a tag +// is visible in the status.tags field and any user who can view an image stream is allowed +// to tag that image into their own image streams. Access to pull images from the integrated +// registry is granted by having the "get imagestreams/layers" permission on a given image +// stream. Users may remove a tag by deleting the imagestreamtag resource, which causes both +// spec and status for that tag to be removed. Image stream history is retained until an +// administrator runs the prune operation, which removes references that are no longer in +// use. To preserve a historical image, ensure there is a tag in spec pointing to that image +// by its digest. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStream struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec describes the desired state of this stream + // +optional + Spec ImageStreamSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // status describes the current state of this stream + // +optional + Status ImageStreamStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ImageStreamSpec represents options for ImageStreams. +type ImageStreamSpec struct { + // lookupPolicy controls how other resources reference images within this namespace. + LookupPolicy ImageLookupPolicy `json:"lookupPolicy,omitempty" protobuf:"bytes,3,opt,name=lookupPolicy"` + // dockerImageRepository is optional, if specified this stream is backed by a container repository on this server + // Deprecated: This field is deprecated as of v3.7 and will be removed in a future release. + // Specify the source for the tags to be imported in each tag via the spec.tags.from reference instead. + DockerImageRepository string `json:"dockerImageRepository,omitempty" protobuf:"bytes,1,opt,name=dockerImageRepository"` + // tags map arbitrary string values to specific image locators + // +patchMergeKey=name + // +patchStrategy=merge + Tags []TagReference `json:"tags,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=tags"` +} + +// ImageLookupPolicy describes how an image stream can be used to override the image references +// used by pods, builds, and other resources in a namespace. +type ImageLookupPolicy struct { + // local will change the docker short image references (like "mysql" or + // "php:latest") on objects in this namespace to the image ID whenever they match + // this image stream, instead of reaching out to a remote registry. The name will + // be fully qualified to an image ID if found. The tag's referencePolicy is taken + // into account on the replaced value. Only works within the current namespace. + Local bool `json:"local" protobuf:"varint,3,opt,name=local"` +} + +// TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track. +type TagReference struct { + // name of the tag + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags. + // +optional + Annotations map[string]string `json:"annotations" protobuf:"bytes,2,rep,name=annotations"` + // Optional; if specified, a reference to another image that this tag should point to. Valid values + // are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references + // can only reference a tag within this same ImageStream. + From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,3,opt,name=from"` + // reference states if the tag will be imported. Default value is false, which means the tag will + // be imported. + Reference bool `json:"reference,omitempty" protobuf:"varint,4,opt,name=reference"` + // generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference + // is changed the generation is set to match the current stream generation (which is incremented every + // time spec is changed). Other processes in the system like the image importer observe that the + // generation of spec tag is newer than the generation recorded in the status and use that as a trigger + // to import the newest remote tag. To trigger a new import, clients may set this value to zero which + // will reset the generation to the latest stream generation. Legacy clients will send this value as + // nil which will be merged with the current tag generation. + // +optional + Generation *int64 `json:"generation" protobuf:"varint,5,opt,name=generation"` + // importPolicy is information that controls how images may be imported by the server. + ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,6,opt,name=importPolicy"` + // referencePolicy defines how other components should consume the image. + ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,7,opt,name=referencePolicy"` +} + +// TagImportPolicy controls how images related to this tag will be imported. +type TagImportPolicy struct { + // insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import. + Insecure bool `json:"insecure,omitempty" protobuf:"varint,1,opt,name=insecure"` + // scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported + Scheduled bool `json:"scheduled,omitempty" protobuf:"varint,2,opt,name=scheduled"` + // importMode describes how to import an image manifest. + ImportMode ImportModeType `json:"importMode,omitempty" protobuf:"bytes,3,opt,name=importMode,casttype=ImportModeType"` +} + +// ImportModeType describes how to import an image manifest. +type ImportModeType string + +const ( + // ImportModeLegacy indicates that the legacy behaviour should be used. + // For manifest lists, the legacy behaviour will discard the manifest list and import a single + // sub-manifest. In this case, the platform is chosen in the following order of priority: + // 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list. + // This mode is the default. + ImportModeLegacy ImportModeType = "Legacy" + // ImportModePreserveOriginal indicates that the original manifest will be preserved. + // For manifest lists, the manifest list and all its sub-manifests will be imported. + ImportModePreserveOriginal ImportModeType = "PreserveOriginal" +) + +// TagReferencePolicyType describes how pull-specs for images in an image stream tag are generated when +// image change triggers are fired. +type TagReferencePolicyType string + +const ( + // SourceTagReferencePolicy indicates the image's original location should be used when the image stream tag + // is resolved into other resources (builds and deployment configurations). + SourceTagReferencePolicy TagReferencePolicyType = "Source" + // LocalTagReferencePolicy indicates the image should prefer to pull via the local integrated registry, + // falling back to the remote location if the integrated registry has not been configured. The reference will + // use the internal DNS name or registry service IP. + LocalTagReferencePolicy TagReferencePolicyType = "Local" +) + +// TagReferencePolicy describes how pull-specs for images in this image stream tag are generated when +// image change triggers in deployment configs or builds are resolved. This allows the image stream +// author to control how images are accessed. +type TagReferencePolicy struct { + // type determines how the image pull spec should be transformed when the image stream tag is used in + // deployment config triggers or new builds. The default value is `Source`, indicating the original + // location of the image should be used (if imported). The user may also specify `Local`, indicating + // that the pull spec should point to the integrated container image registry and leverage the registry's + // ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this + // image to be managed from the image stream's namespace, so others on the platform can access a remote + // image but have no access to the remote secret. It also allows the image layers to be mirrored into + // the local registry which the images can still be pulled even if the upstream registry is unavailable. + Type TagReferencePolicyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=TagReferencePolicyType"` +} + +// ImageStreamStatus contains information about the state of this image stream. +type ImageStreamStatus struct { + // dockerImageRepository represents the effective location this stream may be accessed at. + // May be empty until the server determines where the repository is located + DockerImageRepository string `json:"dockerImageRepository" protobuf:"bytes,1,opt,name=dockerImageRepository"` + // publicDockerImageRepository represents the public location from where the image can + // be pulled outside the cluster. This field may be empty if the administrator + // has not exposed the integrated registry externally. + PublicDockerImageRepository string `json:"publicDockerImageRepository,omitempty" protobuf:"bytes,3,opt,name=publicDockerImageRepository"` + // tags are a historical record of images associated with each tag. The first entry in the + // TagEvent array is the currently tagged image. + // +patchMergeKey=tag + // +patchStrategy=merge + Tags []NamedTagEventList `json:"tags,omitempty" patchStrategy:"merge" patchMergeKey:"tag" protobuf:"bytes,2,rep,name=tags"` +} + +// NamedTagEventList relates a tag to its image history. +type NamedTagEventList struct { + // tag is the tag for which the history is recorded + Tag string `json:"tag" protobuf:"bytes,1,opt,name=tag"` + // Standard object's metadata. + Items []TagEvent `json:"items" protobuf:"bytes,2,rep,name=items"` + // conditions is an array of conditions that apply to the tag event list. + Conditions []TagEventCondition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"` +} + +// TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag. +type TagEvent struct { + // created holds the time the TagEvent was created + Created metav1.Time `json:"created" protobuf:"bytes,1,opt,name=created"` + // dockerImageReference is the string that can be used to pull this image + DockerImageReference string `json:"dockerImageReference" protobuf:"bytes,2,opt,name=dockerImageReference"` + // image is the image + Image string `json:"image" protobuf:"bytes,3,opt,name=image"` + // generation is the spec tag generation that resulted in this tag being updated + Generation int64 `json:"generation" protobuf:"varint,4,opt,name=generation"` +} + +type TagEventConditionType string + +// These are valid conditions of TagEvents. +const ( + // ImportSuccess with status False means the import of the specific tag failed + ImportSuccess TagEventConditionType = "ImportSuccess" +) + +// TagEventCondition contains condition information for a tag event. +type TagEventCondition struct { + // type of tag event condition, currently only ImportSuccess + Type TagEventConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=TagEventConditionType"` + // status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // lastTransitionTime is the time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is a brief machine readable explanation for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human readable description of the details about last transition, complementing reason. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` + // generation is the spec tag generation that this status corresponds to + Generation int64 `json:"generation" protobuf:"varint,6,opt,name=generation"` +} + +// +genclient +// +genclient:skipVerbs=get,list,create,update,patch,delete,deleteCollection,watch +// +genclient:method=Create,verb=create,result=k8s.io/apimachinery/pkg/apis/meta/v1.Status +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamMapping represents a mapping from a single image stream tag to a container +// image as well as the reference to the container image stream the image came from. This +// resource is used by privileged integrators to create an image resource and to associate +// it with an image stream in the status tags field. Creating an ImageStreamMapping will +// allow any user who can view the image stream to tag or pull that image, so only create +// mappings where the user has proven they have access to the image contents directly. +// The only operation supported for this resource is create and the metadata name and +// namespace should be set to the image stream containing the tag that should be updated. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStreamMapping struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // image is a container image. + Image Image `json:"image" protobuf:"bytes,2,opt,name=image"` + // tag is a string value this image can be located with inside the stream. + Tag string `json:"tag" protobuf:"bytes,3,opt,name=tag"` +} + +// +genclient +// +genclient:onlyVerbs=get,list,create,update,delete +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamTag represents an Image that is retrieved by tag name from an ImageStream. +// Use this resource to interact with the tags and images in an image stream by tag, or +// to see the image details for a particular tag. The image associated with this resource +// is the most recently successfully tagged, imported, or pushed image (as described in the +// image stream status.tags.items list for this tag). If an import is in progress or has +// failed the previous image will be shown. Deleting an image stream tag clears both the +// status and spec fields of an image stream. If no image can be retrieved for a given tag, +// a not found error will be returned. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStreamTag struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // tag is the spec tag associated with this image stream tag, and it may be null + // if only pushes have occurred to this image stream. + Tag *TagReference `json:"tag" protobuf:"bytes,2,opt,name=tag"` + + // generation is the current generation of the tagged image - if tag is provided + // and this value is not equal to the tag generation, a user has requested an + // import that has not completed, or conditions will be filled out indicating any + // error. + Generation int64 `json:"generation" protobuf:"varint,3,opt,name=generation"` + + // lookupPolicy indicates whether this tag will handle image references in this + // namespace. + LookupPolicy ImageLookupPolicy `json:"lookupPolicy" protobuf:"varint,6,opt,name=lookupPolicy"` + + // conditions is an array of conditions that apply to the image stream tag. + Conditions []TagEventCondition `json:"conditions,omitempty" protobuf:"bytes,4,rep,name=conditions"` + + // image associated with the ImageStream and tag. + Image Image `json:"image" protobuf:"bytes,5,opt,name=image"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamTagList is a list of ImageStreamTag objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStreamTagList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of image stream tags + Items []ImageStreamTag `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:onlyVerbs=get,list,create,update,delete +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageTag represents a single tag within an image stream and includes the spec, +// the status history, and the currently referenced image (if any) of the provided +// tag. This type replaces the ImageStreamTag by providing a full view of the tag. +// ImageTags are returned for every spec or status tag present on the image stream. +// If no tag exists in either form a not found error will be returned by the API. +// A create operation will succeed if no spec tag has already been defined and the +// spec field is set. Delete will remove both spec and status elements from the +// image stream. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageTag struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the spec tag associated with this image stream tag, and it may be null + // if only pushes have occurred to this image stream. + Spec *TagReference `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // status is the status tag details associated with this image stream tag, and it + // may be null if no push or import has been performed. + Status *NamedTagEventList `json:"status" protobuf:"bytes,3,opt,name=status"` + // image is the details of the most recent image stream status tag, and it may be + // null if import has not completed or an administrator has deleted the image + // object. To verify this is the most recent image, you must verify the generation + // of the most recent status.items entry matches the spec tag (if a spec tag is + // set). This field will not be set when listing image tags. + Image *Image `json:"image" protobuf:"bytes,4,opt,name=image"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageTagList is a list of ImageTag objects. When listing image tags, the image +// field is not populated. Tags are returned in alphabetical order by image stream +// and then tag. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageTagList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of image stream tags + Items []ImageTag `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:onlyVerbs=get +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamImage represents an Image that is retrieved by image name from an ImageStream. +// User interfaces and regular users can use this resource to access the metadata details of +// a tagged image in the image stream history for viewing, since Image resources are not +// directly accessible to end users. A not found error will be returned if no such image is +// referenced by a tag within the ImageStream. Images are created when spec tags are set on +// an image stream that represent an image in an external registry, when pushing to the +// integrated registry, or when tagging an existing image from one image stream to another. +// The name of an image stream image is in the form "@", where the digest is +// the content addressible identifier for the image (sha256:xxxxx...). You can use +// ImageStreamImages as the from.kind of an image stream spec tag to reference an image +// exactly. The only operations supported on the imagestreamimage endpoint are retrieving +// the image. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStreamImage struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // image associated with the ImageStream and image name. + Image Image `json:"image" protobuf:"bytes,2,opt,name=image"` +} + +// DockerImageReference points to a container image. +type DockerImageReference struct { + // Registry is the registry that contains the container image + Registry string `protobuf:"bytes,1,opt,name=registry"` + // Namespace is the namespace that contains the container image + Namespace string `protobuf:"bytes,2,opt,name=namespace"` + // Name is the name of the container image + Name string `protobuf:"bytes,3,opt,name=name"` + // Tag is which tag of the container image is being referenced + Tag string `protobuf:"bytes,4,opt,name=tag"` + // ID is the identifier for the container image + ID string `protobuf:"bytes,5,opt,name=iD"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamLayers describes information about the layers referenced by images in this +// image stream. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStreamLayers struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // blobs is a map of blob name to metadata about the blob. + Blobs map[string]ImageLayerData `json:"blobs" protobuf:"bytes,2,rep,name=blobs"` + // images is a map between an image name and the names of the blobs and config that + // comprise the image. + Images map[string]ImageBlobReferences `json:"images" protobuf:"bytes,3,rep,name=images"` +} + +// ImageBlobReferences describes the blob references within an image. +type ImageBlobReferences struct { + // imageMissing is true if the image is referenced by the image stream but the image + // object has been deleted from the API by an administrator. When this field is set, + // layers and config fields may be empty and callers that depend on the image metadata + // should consider the image to be unavailable for download or viewing. + // +optional + ImageMissing bool `json:"imageMissing" protobuf:"varint,3,opt,name=imageMissing"` + // layers is the list of blobs that compose this image, from base layer to top layer. + // All layers referenced by this array will be defined in the blobs map. Some images + // may have zero layers. + // +optional + Layers []string `json:"layers" protobuf:"bytes,1,rep,name=layers"` + // config, if set, is the blob that contains the image config. Some images do + // not have separate config blobs and this field will be set to nil if so. + // +optional + Config *string `json:"config" protobuf:"bytes,2,opt,name=config"` + // manifests is the list of other image names that this image points + // to. For a single architecture image, it is empty. For a multi-arch + // image, it consists of the digests of single architecture images, + // such images shouldn't have layers nor config. + // +optional + Manifests []string `json:"manifests,omitempty" protobuf:"bytes,4,rep,name=manifests"` +} + +// ImageLayerData contains metadata about an image layer. +type ImageLayerData struct { + // size of the layer in bytes as defined by the underlying store. This field is + // optional if the necessary information about size is not available. + LayerSize *int64 `json:"size" protobuf:"varint,1,opt,name=size"` + // mediaType of the referenced object. + MediaType string `json:"mediaType" protobuf:"bytes,2,opt,name=mediaType"` +} + +// +genclient +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// The image stream import resource provides an easy way for a user to find and import container images +// from other container image registries into the server. Individual images or an entire image repository may +// be imported, and users may choose to see the results of the import prior to tagging the resulting +// images into the specified image stream. +// +// This API is intended for end-user tools that need to see the metadata of the image prior to import +// (for instance, to generate an application from it). Clients that know the desired image can continue +// to create spec.tags directly into their image streams. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStreamImport struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is a description of the images that the user wishes to import + Spec ImageStreamImportSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // status is the result of importing the image + Status ImageStreamImportStatus `json:"status" protobuf:"bytes,3,opt,name=status"` +} + +// ImageStreamImportSpec defines what images should be imported. +type ImageStreamImportSpec struct { + // import indicates whether to perform an import - if so, the specified tags are set on the spec + // and status of the image stream defined by the type meta. + Import bool `json:"import" protobuf:"varint,1,opt,name=import"` + // repository is an optional import of an entire container image repository. A maximum limit on the + // number of tags imported this way is imposed by the server. + Repository *RepositoryImportSpec `json:"repository,omitempty" protobuf:"bytes,2,opt,name=repository"` + // images are a list of individual images to import. + Images []ImageImportSpec `json:"images,omitempty" protobuf:"bytes,3,rep,name=images"` +} + +// ImageStreamImportStatus contains information about the status of an image stream import. +type ImageStreamImportStatus struct { + // import is the image stream that was successfully updated or created when 'to' was set. + Import *ImageStream `json:"import,omitempty" protobuf:"bytes,1,opt,name=import"` + // repository is set if spec.repository was set to the outcome of the import + Repository *RepositoryImportStatus `json:"repository,omitempty" protobuf:"bytes,2,opt,name=repository"` + // images is set with the result of importing spec.images + Images []ImageImportStatus `json:"images,omitempty" protobuf:"bytes,3,rep,name=images"` +} + +// RepositoryImportSpec describes a request to import images from a container image repository. +type RepositoryImportSpec struct { + // from is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + + // importPolicy is the policy controlling how the image is imported + ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,2,opt,name=importPolicy"` + // referencePolicy defines how other components should consume the image + ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,4,opt,name=referencePolicy"` + // includeManifest determines if the manifest for each image is returned in the response + IncludeManifest bool `json:"includeManifest,omitempty" protobuf:"varint,3,opt,name=includeManifest"` +} + +// RepositoryImportStatus describes the result of an image repository import +type RepositoryImportStatus struct { + // status reflects whether any failure occurred during import + Status metav1.Status `json:"status,omitempty" protobuf:"bytes,1,opt,name=status"` + // images is a list of images successfully retrieved by the import of the repository. + Images []ImageImportStatus `json:"images,omitempty" protobuf:"bytes,2,rep,name=images"` + // additionalTags are tags that exist in the repository but were not imported because + // a maximum limit of automatic imports was applied. + AdditionalTags []string `json:"additionalTags,omitempty" protobuf:"bytes,3,rep,name=additionalTags"` +} + +// ImageImportSpec describes a request to import a specific image. +type ImageImportSpec struct { + // from is the source of an image to import; only kind DockerImage is allowed + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + // to is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used + To *corev1.LocalObjectReference `json:"to,omitempty" protobuf:"bytes,2,opt,name=to"` + + // importPolicy is the policy controlling how the image is imported + ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,3,opt,name=importPolicy"` + // referencePolicy defines how other components should consume the image + ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,5,opt,name=referencePolicy"` + // includeManifest determines if the manifest for each image is returned in the response + IncludeManifest bool `json:"includeManifest,omitempty" protobuf:"varint,4,opt,name=includeManifest"` +} + +// ImageImportStatus describes the result of an image import. +type ImageImportStatus struct { + // status is the status of the image import, including errors encountered while retrieving the image + Status metav1.Status `json:"status" protobuf:"bytes,1,opt,name=status"` + // image is the metadata of that image, if the image was located + Image *Image `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // tag is the tag this image was located under, if any + Tag string `json:"tag,omitempty" protobuf:"bytes,3,opt,name=tag"` + // manifests holds sub-manifests metadata when importing a manifest list + Manifests []Image `json:"manifests,omitempty" protobuf:"bytes,4,rep,name=manifests"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SecretList is a list of Secret. +// +openshift:compatibility-gen:level=1 +type SecretList corev1.SecretList diff --git a/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..953f70263c1e0 --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go @@ -0,0 +1,1045 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerImageReference) DeepCopyInto(out *DockerImageReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImageReference. +func (in *DockerImageReference) DeepCopy() *DockerImageReference { + if in == nil { + return nil + } + out := new(DockerImageReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.DockerImageMetadata.DeepCopyInto(&out.DockerImageMetadata) + if in.DockerImageLayers != nil { + in, out := &in.DockerImageLayers, &out.DockerImageLayers + *out = make([]ImageLayer, len(*in)) + copy(*out, *in) + } + if in.Signatures != nil { + in, out := &in.Signatures, &out.Signatures + *out = make([]ImageSignature, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DockerImageSignatures != nil { + in, out := &in.DockerImageSignatures, &out.DockerImageSignatures + *out = make([][]byte, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make([]byte, len(*in)) + copy(*out, *in) + } + } + } + if in.DockerImageManifests != nil { + in, out := &in.DockerImageManifests, &out.DockerImageManifests + *out = make([]ImageManifest, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Image) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageBlobReferences) DeepCopyInto(out *ImageBlobReferences) { + *out = *in + if in.Layers != nil { + in, out := &in.Layers, &out.Layers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(string) + **out = **in + } + if in.Manifests != nil { + in, out := &in.Manifests, &out.Manifests + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageBlobReferences. +func (in *ImageBlobReferences) DeepCopy() *ImageBlobReferences { + if in == nil { + return nil + } + out := new(ImageBlobReferences) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageImportSpec) DeepCopyInto(out *ImageImportSpec) { + *out = *in + out.From = in.From + if in.To != nil { + in, out := &in.To, &out.To + *out = new(corev1.LocalObjectReference) + **out = **in + } + out.ImportPolicy = in.ImportPolicy + out.ReferencePolicy = in.ReferencePolicy + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageImportSpec. +func (in *ImageImportSpec) DeepCopy() *ImageImportSpec { + if in == nil { + return nil + } + out := new(ImageImportSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageImportStatus) DeepCopyInto(out *ImageImportStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(Image) + (*in).DeepCopyInto(*out) + } + if in.Manifests != nil { + in, out := &in.Manifests, &out.Manifests + *out = make([]Image, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageImportStatus. +func (in *ImageImportStatus) DeepCopy() *ImageImportStatus { + if in == nil { + return nil + } + out := new(ImageImportStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLayer) DeepCopyInto(out *ImageLayer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLayer. +func (in *ImageLayer) DeepCopy() *ImageLayer { + if in == nil { + return nil + } + out := new(ImageLayer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLayerData) DeepCopyInto(out *ImageLayerData) { + *out = *in + if in.LayerSize != nil { + in, out := &in.LayerSize, &out.LayerSize + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLayerData. +func (in *ImageLayerData) DeepCopy() *ImageLayerData { + if in == nil { + return nil + } + out := new(ImageLayerData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageList) DeepCopyInto(out *ImageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Image, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList. +func (in *ImageList) DeepCopy() *ImageList { + if in == nil { + return nil + } + out := new(ImageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLookupPolicy) DeepCopyInto(out *ImageLookupPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLookupPolicy. +func (in *ImageLookupPolicy) DeepCopy() *ImageLookupPolicy { + if in == nil { + return nil + } + out := new(ImageLookupPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageManifest) DeepCopyInto(out *ImageManifest) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageManifest. +func (in *ImageManifest) DeepCopy() *ImageManifest { + if in == nil { + return nil + } + out := new(ImageManifest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSignature) DeepCopyInto(out *ImageSignature) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]SignatureCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SignedClaims != nil { + in, out := &in.SignedClaims, &out.SignedClaims + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Created != nil { + in, out := &in.Created, &out.Created + *out = (*in).DeepCopy() + } + if in.IssuedBy != nil { + in, out := &in.IssuedBy, &out.IssuedBy + *out = new(SignatureIssuer) + **out = **in + } + if in.IssuedTo != nil { + in, out := &in.IssuedTo, &out.IssuedTo + *out = new(SignatureSubject) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSignature. +func (in *ImageSignature) DeepCopy() *ImageSignature { + if in == nil { + return nil + } + out := new(ImageSignature) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageSignature) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStream) DeepCopyInto(out *ImageStream) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStream. +func (in *ImageStream) DeepCopy() *ImageStream { + if in == nil { + return nil + } + out := new(ImageStream) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStream) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamImage) DeepCopyInto(out *ImageStreamImage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Image.DeepCopyInto(&out.Image) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImage. +func (in *ImageStreamImage) DeepCopy() *ImageStreamImage { + if in == nil { + return nil + } + out := new(ImageStreamImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamImage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamImport) DeepCopyInto(out *ImageStreamImport) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImport. +func (in *ImageStreamImport) DeepCopy() *ImageStreamImport { + if in == nil { + return nil + } + out := new(ImageStreamImport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamImport) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamImportSpec) DeepCopyInto(out *ImageStreamImportSpec) { + *out = *in + if in.Repository != nil { + in, out := &in.Repository, &out.Repository + *out = new(RepositoryImportSpec) + **out = **in + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ImageImportSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImportSpec. +func (in *ImageStreamImportSpec) DeepCopy() *ImageStreamImportSpec { + if in == nil { + return nil + } + out := new(ImageStreamImportSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamImportStatus) DeepCopyInto(out *ImageStreamImportStatus) { + *out = *in + if in.Import != nil { + in, out := &in.Import, &out.Import + *out = new(ImageStream) + (*in).DeepCopyInto(*out) + } + if in.Repository != nil { + in, out := &in.Repository, &out.Repository + *out = new(RepositoryImportStatus) + (*in).DeepCopyInto(*out) + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ImageImportStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImportStatus. +func (in *ImageStreamImportStatus) DeepCopy() *ImageStreamImportStatus { + if in == nil { + return nil + } + out := new(ImageStreamImportStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamLayers) DeepCopyInto(out *ImageStreamLayers) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Blobs != nil { + in, out := &in.Blobs, &out.Blobs + *out = make(map[string]ImageLayerData, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make(map[string]ImageBlobReferences, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamLayers. +func (in *ImageStreamLayers) DeepCopy() *ImageStreamLayers { + if in == nil { + return nil + } + out := new(ImageStreamLayers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamLayers) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamList) DeepCopyInto(out *ImageStreamList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageStream, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamList. +func (in *ImageStreamList) DeepCopy() *ImageStreamList { + if in == nil { + return nil + } + out := new(ImageStreamList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamMapping) DeepCopyInto(out *ImageStreamMapping) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Image.DeepCopyInto(&out.Image) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamMapping. +func (in *ImageStreamMapping) DeepCopy() *ImageStreamMapping { + if in == nil { + return nil + } + out := new(ImageStreamMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamMapping) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamSpec) DeepCopyInto(out *ImageStreamSpec) { + *out = *in + out.LookupPolicy = in.LookupPolicy + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]TagReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamSpec. +func (in *ImageStreamSpec) DeepCopy() *ImageStreamSpec { + if in == nil { + return nil + } + out := new(ImageStreamSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamStatus) DeepCopyInto(out *ImageStreamStatus) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]NamedTagEventList, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamStatus. +func (in *ImageStreamStatus) DeepCopy() *ImageStreamStatus { + if in == nil { + return nil + } + out := new(ImageStreamStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamTag) DeepCopyInto(out *ImageStreamTag) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(TagReference) + (*in).DeepCopyInto(*out) + } + out.LookupPolicy = in.LookupPolicy + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]TagEventCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Image.DeepCopyInto(&out.Image) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamTag. +func (in *ImageStreamTag) DeepCopy() *ImageStreamTag { + if in == nil { + return nil + } + out := new(ImageStreamTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamTag) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamTagList) DeepCopyInto(out *ImageStreamTagList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageStreamTag, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamTagList. +func (in *ImageStreamTagList) DeepCopy() *ImageStreamTagList { + if in == nil { + return nil + } + out := new(ImageStreamTagList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamTagList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTag) DeepCopyInto(out *ImageTag) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(TagReference) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(NamedTagEventList) + (*in).DeepCopyInto(*out) + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(Image) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTag. +func (in *ImageTag) DeepCopy() *ImageTag { + if in == nil { + return nil + } + out := new(ImageTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageTag) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTagList) DeepCopyInto(out *ImageTagList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageTag, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagList. +func (in *ImageTagList) DeepCopy() *ImageTagList { + if in == nil { + return nil + } + out := new(ImageTagList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageTagList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedTagEventList) DeepCopyInto(out *NamedTagEventList) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TagEvent, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]TagEventCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedTagEventList. +func (in *NamedTagEventList) DeepCopy() *NamedTagEventList { + if in == nil { + return nil + } + out := new(NamedTagEventList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryImportSpec) DeepCopyInto(out *RepositoryImportSpec) { + *out = *in + out.From = in.From + out.ImportPolicy = in.ImportPolicy + out.ReferencePolicy = in.ReferencePolicy + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryImportSpec. +func (in *RepositoryImportSpec) DeepCopy() *RepositoryImportSpec { + if in == nil { + return nil + } + out := new(RepositoryImportSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryImportStatus) DeepCopyInto(out *RepositoryImportStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ImageImportStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalTags != nil { + in, out := &in.AdditionalTags, &out.AdditionalTags + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryImportStatus. +func (in *RepositoryImportStatus) DeepCopy() *RepositoryImportStatus { + if in == nil { + return nil + } + out := new(RepositoryImportStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretList) DeepCopyInto(out *SecretList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]corev1.Secret, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretList. +func (in *SecretList) DeepCopy() *SecretList { + if in == nil { + return nil + } + out := new(SecretList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureCondition) DeepCopyInto(out *SignatureCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureCondition. +func (in *SignatureCondition) DeepCopy() *SignatureCondition { + if in == nil { + return nil + } + out := new(SignatureCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureGenericEntity) DeepCopyInto(out *SignatureGenericEntity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureGenericEntity. +func (in *SignatureGenericEntity) DeepCopy() *SignatureGenericEntity { + if in == nil { + return nil + } + out := new(SignatureGenericEntity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureIssuer) DeepCopyInto(out *SignatureIssuer) { + *out = *in + out.SignatureGenericEntity = in.SignatureGenericEntity + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureIssuer. +func (in *SignatureIssuer) DeepCopy() *SignatureIssuer { + if in == nil { + return nil + } + out := new(SignatureIssuer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureSubject) DeepCopyInto(out *SignatureSubject) { + *out = *in + out.SignatureGenericEntity = in.SignatureGenericEntity + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureSubject. +func (in *SignatureSubject) DeepCopy() *SignatureSubject { + if in == nil { + return nil + } + out := new(SignatureSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagEvent) DeepCopyInto(out *TagEvent) { + *out = *in + in.Created.DeepCopyInto(&out.Created) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagEvent. +func (in *TagEvent) DeepCopy() *TagEvent { + if in == nil { + return nil + } + out := new(TagEvent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagEventCondition) DeepCopyInto(out *TagEventCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagEventCondition. +func (in *TagEventCondition) DeepCopy() *TagEventCondition { + if in == nil { + return nil + } + out := new(TagEventCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagImportPolicy) DeepCopyInto(out *TagImportPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagImportPolicy. +func (in *TagImportPolicy) DeepCopy() *TagImportPolicy { + if in == nil { + return nil + } + out := new(TagImportPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagReference) DeepCopyInto(out *TagReference) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.From != nil { + in, out := &in.From, &out.From + *out = new(corev1.ObjectReference) + **out = **in + } + if in.Generation != nil { + in, out := &in.Generation, &out.Generation + *out = new(int64) + **out = **in + } + out.ImportPolicy = in.ImportPolicy + out.ReferencePolicy = in.ReferencePolicy + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagReference. +func (in *TagReference) DeepCopy() *TagReference { + if in == nil { + return nil + } + out := new(TagReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagReferencePolicy) DeepCopyInto(out *TagReferencePolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagReferencePolicy. +func (in *TagReferencePolicy) DeepCopy() *TagReferencePolicy { + if in == nil { + return nil + } + out := new(TagReferencePolicy) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000..e0720bec772bc --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,444 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_DockerImageReference = map[string]string{ + "": "DockerImageReference points to a container image.", + "Registry": "Registry is the registry that contains the container image", + "Namespace": "Namespace is the namespace that contains the container image", + "Name": "Name is the name of the container image", + "Tag": "Tag is which tag of the container image is being referenced", + "ID": "ID is the identifier for the container image", +} + +func (DockerImageReference) SwaggerDoc() map[string]string { + return map_DockerImageReference +} + +var map_Image = map[string]string{ + "": "Image is an immutable representation of a container image and metadata at a point in time. Images are named by taking a hash of their contents (metadata and content) and any change in format, content, or metadata results in a new name. The images resource is primarily for use by cluster administrators and integrations like the cluster image registry - end users instead access images via the imagestreamtags or imagestreamimages resources. While image metadata is stored in the API, any integration that implements the container image registry API must provide its own storage for the raw manifest data, image config, and layer contents.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "dockerImageReference": "dockerImageReference is the string that can be used to pull this image.", + "dockerImageMetadata": "dockerImageMetadata contains metadata about this image", + "dockerImageMetadataVersion": "dockerImageMetadataVersion conveys the version of the object, which if empty defaults to \"1.0\"", + "dockerImageManifest": "dockerImageManifest is the raw JSON of the manifest", + "dockerImageLayers": "dockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list.", + "signatures": "signatures holds all signatures of the image.", + "dockerImageSignatures": "dockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1.", + "dockerImageManifestMediaType": "dockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2.", + "dockerImageConfig": "dockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. Will not be set when the image represents a manifest list.", + "dockerImageManifests": "dockerImageManifests holds information about sub-manifests when the image represents a manifest list. When this field is present, no DockerImageLayers should be specified.", +} + +func (Image) SwaggerDoc() map[string]string { + return map_Image +} + +var map_ImageBlobReferences = map[string]string{ + "": "ImageBlobReferences describes the blob references within an image.", + "imageMissing": "imageMissing is true if the image is referenced by the image stream but the image object has been deleted from the API by an administrator. When this field is set, layers and config fields may be empty and callers that depend on the image metadata should consider the image to be unavailable for download or viewing.", + "layers": "layers is the list of blobs that compose this image, from base layer to top layer. All layers referenced by this array will be defined in the blobs map. Some images may have zero layers.", + "config": "config, if set, is the blob that contains the image config. Some images do not have separate config blobs and this field will be set to nil if so.", + "manifests": "manifests is the list of other image names that this image points to. For a single architecture image, it is empty. For a multi-arch image, it consists of the digests of single architecture images, such images shouldn't have layers nor config.", +} + +func (ImageBlobReferences) SwaggerDoc() map[string]string { + return map_ImageBlobReferences +} + +var map_ImageImportSpec = map[string]string{ + "": "ImageImportSpec describes a request to import a specific image.", + "from": "from is the source of an image to import; only kind DockerImage is allowed", + "to": "to is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used", + "importPolicy": "importPolicy is the policy controlling how the image is imported", + "referencePolicy": "referencePolicy defines how other components should consume the image", + "includeManifest": "includeManifest determines if the manifest for each image is returned in the response", +} + +func (ImageImportSpec) SwaggerDoc() map[string]string { + return map_ImageImportSpec +} + +var map_ImageImportStatus = map[string]string{ + "": "ImageImportStatus describes the result of an image import.", + "status": "status is the status of the image import, including errors encountered while retrieving the image", + "image": "image is the metadata of that image, if the image was located", + "tag": "tag is the tag this image was located under, if any", + "manifests": "manifests holds sub-manifests metadata when importing a manifest list", +} + +func (ImageImportStatus) SwaggerDoc() map[string]string { + return map_ImageImportStatus +} + +var map_ImageLayer = map[string]string{ + "": "ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none.", + "name": "name of the layer as defined by the underlying store.", + "size": "size of the layer in bytes as defined by the underlying store.", + "mediaType": "mediaType of the referenced object.", +} + +func (ImageLayer) SwaggerDoc() map[string]string { + return map_ImageLayer +} + +var map_ImageLayerData = map[string]string{ + "": "ImageLayerData contains metadata about an image layer.", + "size": "size of the layer in bytes as defined by the underlying store. This field is optional if the necessary information about size is not available.", + "mediaType": "mediaType of the referenced object.", +} + +func (ImageLayerData) SwaggerDoc() map[string]string { + return map_ImageLayerData +} + +var map_ImageList = map[string]string{ + "": "ImageList is a list of Image objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of images", +} + +func (ImageList) SwaggerDoc() map[string]string { + return map_ImageList +} + +var map_ImageLookupPolicy = map[string]string{ + "": "ImageLookupPolicy describes how an image stream can be used to override the image references used by pods, builds, and other resources in a namespace.", + "local": "local will change the docker short image references (like \"mysql\" or \"php:latest\") on objects in this namespace to the image ID whenever they match this image stream, instead of reaching out to a remote registry. The name will be fully qualified to an image ID if found. The tag's referencePolicy is taken into account on the replaced value. Only works within the current namespace.", +} + +func (ImageLookupPolicy) SwaggerDoc() map[string]string { + return map_ImageLookupPolicy +} + +var map_ImageManifest = map[string]string{ + "": "ImageManifest represents sub-manifests of a manifest list. The Digest field points to a regular Image object.", + "digest": "digest is the unique identifier for the manifest. It refers to an Image object.", + "mediaType": "mediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json.", + "manifestSize": "manifestSize represents the size of the raw object contents, in bytes.", + "architecture": "architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`.", + "os": "os specifies the operating system, for example `linux`.", + "variant": "variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU variant of the ARM CPU.", +} + +func (ImageManifest) SwaggerDoc() map[string]string { + return map_ImageManifest +} + +var map_ImageSignature = map[string]string{ + "": "ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims as long as the signature is trusted. Based on this information it is possible to restrict runnable images to those matching cluster-wide policy. Mandatory fields should be parsed by clients doing image verification. The others are parsed from signature's content by the server. They serve just an informative purpose.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "type": "Required: Describes a type of stored blob.", + "content": "Required: An opaque binary string which is an image's signature.", + "conditions": "conditions represent the latest available observations of a signature's current state.", + "imageIdentity": "A human readable string representing image's identity. It could be a product name and version, or an image pull spec (e.g. \"registry.access.redhat.com/rhel7/rhel:7.2\").", + "signedClaims": "Contains claims from the signature.", + "created": "If specified, it is the time of signature's creation.", + "issuedBy": "If specified, it holds information about an issuer of signing certificate or key (a person or entity who signed the signing certificate or key).", + "issuedTo": "If specified, it holds information about a subject of signing certificate or key (a person or entity who signed the image).", +} + +func (ImageSignature) SwaggerDoc() map[string]string { + return map_ImageSignature +} + +var map_ImageStream = map[string]string{ + "": "An ImageStream stores a mapping of tags to images, metadata overrides that are applied when images are tagged in a stream, and an optional reference to a container image repository on a registry. Users typically update the spec.tags field to point to external images which are imported from container registries using credentials in your namespace with the pull secret type, or to existing image stream tags and images which are immediately accessible for tagging or pulling. The history of images applied to a tag is visible in the status.tags field and any user who can view an image stream is allowed to tag that image into their own image streams. Access to pull images from the integrated registry is granted by having the \"get imagestreams/layers\" permission on a given image stream. Users may remove a tag by deleting the imagestreamtag resource, which causes both spec and status for that tag to be removed. Image stream history is retained until an administrator runs the prune operation, which removes references that are no longer in use. To preserve a historical image, ensure there is a tag in spec pointing to that image by its digest.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec describes the desired state of this stream", + "status": "status describes the current state of this stream", +} + +func (ImageStream) SwaggerDoc() map[string]string { + return map_ImageStream +} + +var map_ImageStreamImage = map[string]string{ + "": "ImageStreamImage represents an Image that is retrieved by image name from an ImageStream. User interfaces and regular users can use this resource to access the metadata details of a tagged image in the image stream history for viewing, since Image resources are not directly accessible to end users. A not found error will be returned if no such image is referenced by a tag within the ImageStream. Images are created when spec tags are set on an image stream that represent an image in an external registry, when pushing to the integrated registry, or when tagging an existing image from one image stream to another. The name of an image stream image is in the form \"@\", where the digest is the content addressible identifier for the image (sha256:xxxxx...). You can use ImageStreamImages as the from.kind of an image stream spec tag to reference an image exactly. The only operations supported on the imagestreamimage endpoint are retrieving the image.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "image": "image associated with the ImageStream and image name.", +} + +func (ImageStreamImage) SwaggerDoc() map[string]string { + return map_ImageStreamImage +} + +var map_ImageStreamImport = map[string]string{ + "": "The image stream import resource provides an easy way for a user to find and import container images from other container image registries into the server. Individual images or an entire image repository may be imported, and users may choose to see the results of the import prior to tagging the resulting images into the specified image stream.\n\nThis API is intended for end-user tools that need to see the metadata of the image prior to import (for instance, to generate an application from it). Clients that know the desired image can continue to create spec.tags directly into their image streams.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is a description of the images that the user wishes to import", + "status": "status is the result of importing the image", +} + +func (ImageStreamImport) SwaggerDoc() map[string]string { + return map_ImageStreamImport +} + +var map_ImageStreamImportSpec = map[string]string{ + "": "ImageStreamImportSpec defines what images should be imported.", + "import": "import indicates whether to perform an import - if so, the specified tags are set on the spec and status of the image stream defined by the type meta.", + "repository": "repository is an optional import of an entire container image repository. A maximum limit on the number of tags imported this way is imposed by the server.", + "images": "images are a list of individual images to import.", +} + +func (ImageStreamImportSpec) SwaggerDoc() map[string]string { + return map_ImageStreamImportSpec +} + +var map_ImageStreamImportStatus = map[string]string{ + "": "ImageStreamImportStatus contains information about the status of an image stream import.", + "import": "import is the image stream that was successfully updated or created when 'to' was set.", + "repository": "repository is set if spec.repository was set to the outcome of the import", + "images": "images is set with the result of importing spec.images", +} + +func (ImageStreamImportStatus) SwaggerDoc() map[string]string { + return map_ImageStreamImportStatus +} + +var map_ImageStreamLayers = map[string]string{ + "": "ImageStreamLayers describes information about the layers referenced by images in this image stream.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "blobs": "blobs is a map of blob name to metadata about the blob.", + "images": "images is a map between an image name and the names of the blobs and config that comprise the image.", +} + +func (ImageStreamLayers) SwaggerDoc() map[string]string { + return map_ImageStreamLayers +} + +var map_ImageStreamList = map[string]string{ + "": "ImageStreamList is a list of ImageStream objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of imageStreams", +} + +func (ImageStreamList) SwaggerDoc() map[string]string { + return map_ImageStreamList +} + +var map_ImageStreamMapping = map[string]string{ + "": "ImageStreamMapping represents a mapping from a single image stream tag to a container image as well as the reference to the container image stream the image came from. This resource is used by privileged integrators to create an image resource and to associate it with an image stream in the status tags field. Creating an ImageStreamMapping will allow any user who can view the image stream to tag or pull that image, so only create mappings where the user has proven they have access to the image contents directly. The only operation supported for this resource is create and the metadata name and namespace should be set to the image stream containing the tag that should be updated.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "image": "image is a container image.", + "tag": "tag is a string value this image can be located with inside the stream.", +} + +func (ImageStreamMapping) SwaggerDoc() map[string]string { + return map_ImageStreamMapping +} + +var map_ImageStreamSpec = map[string]string{ + "": "ImageStreamSpec represents options for ImageStreams.", + "lookupPolicy": "lookupPolicy controls how other resources reference images within this namespace.", + "dockerImageRepository": "dockerImageRepository is optional, if specified this stream is backed by a container repository on this server Deprecated: This field is deprecated as of v3.7 and will be removed in a future release. Specify the source for the tags to be imported in each tag via the spec.tags.from reference instead.", + "tags": "tags map arbitrary string values to specific image locators", +} + +func (ImageStreamSpec) SwaggerDoc() map[string]string { + return map_ImageStreamSpec +} + +var map_ImageStreamStatus = map[string]string{ + "": "ImageStreamStatus contains information about the state of this image stream.", + "dockerImageRepository": "dockerImageRepository represents the effective location this stream may be accessed at. May be empty until the server determines where the repository is located", + "publicDockerImageRepository": "publicDockerImageRepository represents the public location from where the image can be pulled outside the cluster. This field may be empty if the administrator has not exposed the integrated registry externally.", + "tags": "tags are a historical record of images associated with each tag. The first entry in the TagEvent array is the currently tagged image.", +} + +func (ImageStreamStatus) SwaggerDoc() map[string]string { + return map_ImageStreamStatus +} + +var map_ImageStreamTag = map[string]string{ + "": "ImageStreamTag represents an Image that is retrieved by tag name from an ImageStream. Use this resource to interact with the tags and images in an image stream by tag, or to see the image details for a particular tag. The image associated with this resource is the most recently successfully tagged, imported, or pushed image (as described in the image stream status.tags.items list for this tag). If an import is in progress or has failed the previous image will be shown. Deleting an image stream tag clears both the status and spec fields of an image stream. If no image can be retrieved for a given tag, a not found error will be returned.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "tag": "tag is the spec tag associated with this image stream tag, and it may be null if only pushes have occurred to this image stream.", + "generation": "generation is the current generation of the tagged image - if tag is provided and this value is not equal to the tag generation, a user has requested an import that has not completed, or conditions will be filled out indicating any error.", + "lookupPolicy": "lookupPolicy indicates whether this tag will handle image references in this namespace.", + "conditions": "conditions is an array of conditions that apply to the image stream tag.", + "image": "image associated with the ImageStream and tag.", +} + +func (ImageStreamTag) SwaggerDoc() map[string]string { + return map_ImageStreamTag +} + +var map_ImageStreamTagList = map[string]string{ + "": "ImageStreamTagList is a list of ImageStreamTag objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of image stream tags", +} + +func (ImageStreamTagList) SwaggerDoc() map[string]string { + return map_ImageStreamTagList +} + +var map_ImageTag = map[string]string{ + "": "ImageTag represents a single tag within an image stream and includes the spec, the status history, and the currently referenced image (if any) of the provided tag. This type replaces the ImageStreamTag by providing a full view of the tag. ImageTags are returned for every spec or status tag present on the image stream. If no tag exists in either form a not found error will be returned by the API. A create operation will succeed if no spec tag has already been defined and the spec field is set. Delete will remove both spec and status elements from the image stream.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the spec tag associated with this image stream tag, and it may be null if only pushes have occurred to this image stream.", + "status": "status is the status tag details associated with this image stream tag, and it may be null if no push or import has been performed.", + "image": "image is the details of the most recent image stream status tag, and it may be null if import has not completed or an administrator has deleted the image object. To verify this is the most recent image, you must verify the generation of the most recent status.items entry matches the spec tag (if a spec tag is set). This field will not be set when listing image tags.", +} + +func (ImageTag) SwaggerDoc() map[string]string { + return map_ImageTag +} + +var map_ImageTagList = map[string]string{ + "": "ImageTagList is a list of ImageTag objects. When listing image tags, the image field is not populated. Tags are returned in alphabetical order by image stream and then tag.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of image stream tags", +} + +func (ImageTagList) SwaggerDoc() map[string]string { + return map_ImageTagList +} + +var map_NamedTagEventList = map[string]string{ + "": "NamedTagEventList relates a tag to its image history.", + "tag": "tag is the tag for which the history is recorded", + "items": "Standard object's metadata.", + "conditions": "conditions is an array of conditions that apply to the tag event list.", +} + +func (NamedTagEventList) SwaggerDoc() map[string]string { + return map_NamedTagEventList +} + +var map_RepositoryImportSpec = map[string]string{ + "": "RepositoryImportSpec describes a request to import images from a container image repository.", + "from": "from is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed", + "importPolicy": "importPolicy is the policy controlling how the image is imported", + "referencePolicy": "referencePolicy defines how other components should consume the image", + "includeManifest": "includeManifest determines if the manifest for each image is returned in the response", +} + +func (RepositoryImportSpec) SwaggerDoc() map[string]string { + return map_RepositoryImportSpec +} + +var map_RepositoryImportStatus = map[string]string{ + "": "RepositoryImportStatus describes the result of an image repository import", + "status": "status reflects whether any failure occurred during import", + "images": "images is a list of images successfully retrieved by the import of the repository.", + "additionalTags": "additionalTags are tags that exist in the repository but were not imported because a maximum limit of automatic imports was applied.", +} + +func (RepositoryImportStatus) SwaggerDoc() map[string]string { + return map_RepositoryImportStatus +} + +var map_SignatureCondition = map[string]string{ + "": "SignatureCondition describes an image signature condition of particular kind at particular probe time.", + "type": "type of signature condition, Complete or Failed.", + "status": "status of the condition, one of True, False, Unknown.", + "lastProbeTime": "Last time the condition was checked.", + "lastTransitionTime": "Last time the condition transit from one status to another.", + "reason": "(brief) reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (SignatureCondition) SwaggerDoc() map[string]string { + return map_SignatureCondition +} + +var map_SignatureGenericEntity = map[string]string{ + "": "SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject of signing certificate or key.", + "organization": "organization name.", + "commonName": "Common name (e.g. openshift-signing-service).", +} + +func (SignatureGenericEntity) SwaggerDoc() map[string]string { + return map_SignatureGenericEntity +} + +var map_SignatureIssuer = map[string]string{ + "": "SignatureIssuer holds information about an issuer of signing certificate or key.", +} + +func (SignatureIssuer) SwaggerDoc() map[string]string { + return map_SignatureIssuer +} + +var map_SignatureSubject = map[string]string{ + "": "SignatureSubject holds information about a person or entity who created the signature.", + "publicKeyID": "If present, it is a human readable key id of public key belonging to the subject used to verify image signature. It should contain at least 64 lowest bits of public key's fingerprint (e.g. 0x685ebe62bf278440).", +} + +func (SignatureSubject) SwaggerDoc() map[string]string { + return map_SignatureSubject +} + +var map_TagEvent = map[string]string{ + "": "TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag.", + "created": "created holds the time the TagEvent was created", + "dockerImageReference": "dockerImageReference is the string that can be used to pull this image", + "image": "image is the image", + "generation": "generation is the spec tag generation that resulted in this tag being updated", +} + +func (TagEvent) SwaggerDoc() map[string]string { + return map_TagEvent +} + +var map_TagEventCondition = map[string]string{ + "": "TagEventCondition contains condition information for a tag event.", + "type": "type of tag event condition, currently only ImportSuccess", + "status": "status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "lastTransitionTime is the time the condition transitioned from one status to another.", + "reason": "reason is a brief machine readable explanation for the condition's last transition.", + "message": "message is a human readable description of the details about last transition, complementing reason.", + "generation": "generation is the spec tag generation that this status corresponds to", +} + +func (TagEventCondition) SwaggerDoc() map[string]string { + return map_TagEventCondition +} + +var map_TagImportPolicy = map[string]string{ + "": "TagImportPolicy controls how images related to this tag will be imported.", + "insecure": "insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import.", + "scheduled": "scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported", + "importMode": "importMode describes how to import an image manifest.", +} + +func (TagImportPolicy) SwaggerDoc() map[string]string { + return map_TagImportPolicy +} + +var map_TagReference = map[string]string{ + "": "TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track.", + "name": "name of the tag", + "annotations": "Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags.", + "from": "Optional; if specified, a reference to another image that this tag should point to. Valid values are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references can only reference a tag within this same ImageStream.", + "reference": "reference states if the tag will be imported. Default value is false, which means the tag will be imported.", + "generation": "generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference is changed the generation is set to match the current stream generation (which is incremented every time spec is changed). Other processes in the system like the image importer observe that the generation of spec tag is newer than the generation recorded in the status and use that as a trigger to import the newest remote tag. To trigger a new import, clients may set this value to zero which will reset the generation to the latest stream generation. Legacy clients will send this value as nil which will be merged with the current tag generation.", + "importPolicy": "importPolicy is information that controls how images may be imported by the server.", + "referencePolicy": "referencePolicy defines how other components should consume the image.", +} + +func (TagReference) SwaggerDoc() map[string]string { + return map_TagReference +} + +var map_TagReferencePolicy = map[string]string{ + "": "TagReferencePolicy describes how pull-specs for images in this image stream tag are generated when image change triggers in deployment configs or builds are resolved. This allows the image stream author to control how images are accessed.", + "type": "type determines how the image pull spec should be transformed when the image stream tag is used in deployment config triggers or new builds. The default value is `Source`, indicating the original location of the image should be used (if imported). The user may also specify `Local`, indicating that the pull spec should point to the integrated container image registry and leverage the registry's ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this image to be managed from the image stream's namespace, so others on the platform can access a remote image but have no access to the remote secret. It also allows the image layers to be mirrored into the local registry which the images can still be pulled even if the upstream registry is unavailable.", +} + +func (TagReferencePolicy) SwaggerDoc() map[string]string { + return map_TagReferencePolicy +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/doc.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/doc.go new file mode 100644 index 0000000000000..d8872a61329bf --- /dev/null +++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=kubecontrolplane.config.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/register.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/register.go new file mode 100644 index 0000000000000..f8abc8ad8ce8f --- /dev/null +++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/register.go @@ -0,0 +1,38 @@ +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + osinv1 "github.com/openshift/api/osin/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "kubecontrolplane.config.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, osinv1.Install, configv1.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &KubeAPIServerConfig{}, + &KubeControllerManagerConfig{}, + ) + return nil +} diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go new file mode 100644 index 0000000000000..cd1ba7ec56d29 --- /dev/null +++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go @@ -0,0 +1,236 @@ +package v1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + configv1 "github.com/openshift/api/config/v1" + osinv1 "github.com/openshift/api/osin/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type KubeAPIServerConfig struct { + metav1.TypeMeta `json:",inline"` + + // provides the standard apiserver configuration + configv1.GenericAPIServerConfig `json:",inline"` + + // authConfig configures authentication options in addition to the standard + // oauth token and client certificate authenticators + AuthConfig MasterAuthConfig `json:"authConfig"` + + // aggregatorConfig has options for configuring the aggregator component of the API server. + AggregatorConfig AggregatorConfig `json:"aggregatorConfig"` + + // kubeletClientInfo contains information about how to connect to kubelets + KubeletClientInfo KubeletConnectionInfo `json:"kubeletClientInfo"` + + // servicesSubnet is the subnet to use for assigning service IPs + ServicesSubnet string `json:"servicesSubnet"` + // servicesNodePortRange is the range to use for assigning service public ports on a host. + ServicesNodePortRange string `json:"servicesNodePortRange"` + + // DEPRECATED: consolePublicURL has been deprecated and setting it has no effect. + ConsolePublicURL string `json:"consolePublicURL"` + + // userAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS! + // TODO I think we should just drop this feature. + UserAgentMatchingConfig UserAgentMatchingConfig `json:"userAgentMatchingConfig"` + + // imagePolicyConfig feeds the image policy admission plugin + // TODO make it an admission plugin config + ImagePolicyConfig KubeAPIServerImagePolicyConfig `json:"imagePolicyConfig"` + + // projectConfig feeds an admission plugin + // TODO make it an admission plugin config + ProjectConfig KubeAPIServerProjectConfig `json:"projectConfig"` + + // serviceAccountPublicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. + // (If any file contains a private key, the public portion of the key is used) + // The list of public keys is used to verify presented service account tokens. + // Each key is tried in order until the list is exhausted or verification succeeds. + // If no keys are specified, no service account authentication will be available. + ServiceAccountPublicKeyFiles []string `json:"serviceAccountPublicKeyFiles"` + + // oauthConfig, if present start the /oauth endpoint in this process + OAuthConfig *osinv1.OAuthConfig `json:"oauthConfig"` + + // TODO this needs to be removed. + APIServerArguments map[string]Arguments `json:"apiServerArguments"` + + // minimumKubeletVersion is the lowest version of a kubelet that can join the cluster. + // Specifically, the apiserver will deny most authorization requests of kubelets that are older + // than the specified version, only allowing the kubelet to get and update its node object, and perform + // subjectaccessreviews. + // This means any kubelet that attempts to join the cluster will not be able to run any assigned workloads, + // and will eventually be marked as not ready. + // Its max length is 8, so maximum version allowed is either "9.999.99" or "99.99.99". + // Since the kubelet reports the version of the kubernetes release, not Openshift, this field references + // the underlying kubernetes version this version of Openshift is based off of. + // In other words: if an admin wishes to ensure no nodes run an older version than Openshift 4.17, then + // they should set the minimumKubeletVersion to 1.30.0. + // When comparing versions, the kubelet's version is stripped of any contents outside of major.minor.patch version. + // Thus, a kubelet with version "1.0.0-ec.0" will be compatible with minimumKubeletVersion "1.0.0" or earlier. + // +kubebuilder:validation:XValidation:rule="self == \"\" || self.matches('^[0-9]*.[0-9]*.[0-9]*$')",message="minmumKubeletVersion must be in a semver compatible format of x.y.z, or empty" + // +kubebuilder:validation:MaxLength:=8 + // +openshift:enable:FeatureGate=MinimumKubeletVersion + // +optional + MinimumKubeletVersion string `json:"minimumKubeletVersion"` +} + +// Arguments masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type Arguments []string + +func (t Arguments) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +type KubeAPIServerImagePolicyConfig struct { + // internalRegistryHostname sets the hostname for the default internal image + // registry. The value must be in "hostname[:port]" format. + InternalRegistryHostname string `json:"internalRegistryHostname"` + // externalRegistryHostnames provides the hostnames for the default external image + // registry. The external hostname should be set only when the image registry + // is exposed externally. The first value is used in 'publicDockerImageRepository' + // field in ImageStreams. The value must be in "hostname[:port]" format. + ExternalRegistryHostnames []string `json:"externalRegistryHostnames"` +} + +type KubeAPIServerProjectConfig struct { + // defaultNodeSelector holds default project node label selector + DefaultNodeSelector string `json:"defaultNodeSelector"` +} + +// KubeletConnectionInfo holds information necessary for connecting to a kubelet +type KubeletConnectionInfo struct { + // port is the port to connect to kubelets on + Port uint32 `json:"port"` + // ca is the CA for verifying TLS connections to kubelets + CA string `json:"ca"` + // CertInfo is the TLS client cert information for securing communication to kubelets + // this is anonymous so that we can inline it for serialization + configv1.CertInfo `json:",inline"` +} + +// UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS! +type UserAgentMatchingConfig struct { + // requiredClients if this list is non-empty, then a User-Agent must match one of the UserAgentRegexes to be allowed + RequiredClients []UserAgentMatchRule `json:"requiredClients"` + + // deniedClients if this list is non-empty, then a User-Agent must not match any of the UserAgentRegexes + DeniedClients []UserAgentDenyRule `json:"deniedClients"` + + // defaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given. + DefaultRejectionMessage string `json:"defaultRejectionMessage"` +} + +// UserAgentMatchRule describes how to match a given request based on User-Agent and HTTPVerb +type UserAgentMatchRule struct { + // regex is a regex that is checked against the User-Agent. + // Known variants of oc clients + // 1. oc accessing kube resources: oc/v1.2.0 (linux/amd64) kubernetes/bc4550d + // 2. oc accessing openshift resources: oc/v1.1.3 (linux/amd64) openshift/b348c2f + // 3. openshift kubectl accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d + // 4. openshift kubectl accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f + // 5. oadm accessing kube resources: oadm/v1.2.0 (linux/amd64) kubernetes/bc4550d + // 6. oadm accessing openshift resources: oadm/v1.1.3 (linux/amd64) openshift/b348c2f + // 7. openshift cli accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d + // 8. openshift cli accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f + Regex string `json:"regex"` + + // httpVerbs specifies which HTTP verbs should be matched. An empty list means "match all verbs". + HTTPVerbs []string `json:"httpVerbs"` +} + +// UserAgentDenyRule adds a rejection message that can be used to help a user figure out how to get an approved client +type UserAgentDenyRule struct { + UserAgentMatchRule `json:",inline"` + + // rejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used. + RejectionMessage string `json:"rejectionMessage"` +} + +// MasterAuthConfig configures authentication options in addition to the standard +// oauth token and client certificate authenticators +type MasterAuthConfig struct { + // requestHeader holds options for setting up a front proxy against the API. It is optional. + RequestHeader *RequestHeaderAuthenticationOptions `json:"requestHeader"` + // webhookTokenAuthenticators, if present configures remote token reviewers + WebhookTokenAuthenticators []WebhookTokenAuthenticator `json:"webhookTokenAuthenticators"` + // oauthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization + // Server Metadata for an external OAuth server. + // See IETF Draft: // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + // This option is mutually exclusive with OAuthConfig + OAuthMetadataFile string `json:"oauthMetadataFile"` +} + +// WebhookTokenAuthenticators holds the necessary configuation options for +// external token authenticators +type WebhookTokenAuthenticator struct { + // configFile is a path to a Kubeconfig file with the webhook configuration + ConfigFile string `json:"configFile"` + // cacheTTL indicates how long an authentication result should be cached. + // It takes a valid time duration string (e.g. "5m"). + // If empty, you get a default timeout of 2 minutes. + // If zero (e.g. "0m"), caching is disabled + CacheTTL string `json:"cacheTTL"` +} + +// RequestHeaderAuthenticationOptions provides options for setting up a front proxy against the entire +// API instead of against the /oauth endpoint. +type RequestHeaderAuthenticationOptions struct { + // clientCA is a file with the trusted signer certs. It is required. + ClientCA string `json:"clientCA"` + // clientCommonNames is a required list of common names to require a match from. + ClientCommonNames []string `json:"clientCommonNames"` + + // usernameHeaders is the list of headers to check for user information. First hit wins. + UsernameHeaders []string `json:"usernameHeaders"` + // groupHeaders is the set of headers to check for group information. All are unioned. + GroupHeaders []string `json:"groupHeaders"` + // extraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested. + ExtraHeaderPrefixes []string `json:"extraHeaderPrefixes"` +} + +// AggregatorConfig holds information required to make the aggregator function. +type AggregatorConfig struct { + // proxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers + ProxyClientInfo configv1.CertInfo `json:"proxyClientInfo"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type KubeControllerManagerConfig struct { + metav1.TypeMeta `json:",inline"` + + // serviceServingCert provides support for the old alpha service serving cert signer CA bundle + ServiceServingCert ServiceServingCert `json:"serviceServingCert"` + + // projectConfig is an optimization for the daemonset controller + ProjectConfig KubeControllerManagerProjectConfig `json:"projectConfig"` + + // extendedArguments is used to configure the kube-controller-manager + ExtendedArguments map[string]Arguments `json:"extendedArguments"` +} + +type KubeControllerManagerProjectConfig struct { + // defaultNodeSelector holds default project node label selector + DefaultNodeSelector string `json:"defaultNodeSelector"` +} + +// ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for +// pods fulfilling a service to serve with. +type ServiceServingCert struct { + // certFile is a file containing a PEM-encoded certificate + CertFile string `json:"certFile"` +} diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..e4378aa527257 --- /dev/null +++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.deepcopy.go @@ -0,0 +1,379 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + osinv1 "github.com/openshift/api/osin/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregatorConfig) DeepCopyInto(out *AggregatorConfig) { + *out = *in + out.ProxyClientInfo = in.ProxyClientInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregatorConfig. +func (in *AggregatorConfig) DeepCopy() *AggregatorConfig { + if in == nil { + return nil + } + out := new(AggregatorConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Arguments) DeepCopyInto(out *Arguments) { + { + in := &in + *out = make(Arguments, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Arguments. +func (in Arguments) DeepCopy() Arguments { + if in == nil { + return nil + } + out := new(Arguments) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerConfig) DeepCopyInto(out *KubeAPIServerConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.GenericAPIServerConfig.DeepCopyInto(&out.GenericAPIServerConfig) + in.AuthConfig.DeepCopyInto(&out.AuthConfig) + out.AggregatorConfig = in.AggregatorConfig + out.KubeletClientInfo = in.KubeletClientInfo + in.UserAgentMatchingConfig.DeepCopyInto(&out.UserAgentMatchingConfig) + in.ImagePolicyConfig.DeepCopyInto(&out.ImagePolicyConfig) + out.ProjectConfig = in.ProjectConfig + if in.ServiceAccountPublicKeyFiles != nil { + in, out := &in.ServiceAccountPublicKeyFiles, &out.ServiceAccountPublicKeyFiles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OAuthConfig != nil { + in, out := &in.OAuthConfig, &out.OAuthConfig + *out = new(osinv1.OAuthConfig) + (*in).DeepCopyInto(*out) + } + if in.APIServerArguments != nil { + in, out := &in.APIServerArguments, &out.APIServerArguments + *out = make(map[string]Arguments, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(Arguments, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerConfig. +func (in *KubeAPIServerConfig) DeepCopy() *KubeAPIServerConfig { + if in == nil { + return nil + } + out := new(KubeAPIServerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeAPIServerConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerImagePolicyConfig) DeepCopyInto(out *KubeAPIServerImagePolicyConfig) { + *out = *in + if in.ExternalRegistryHostnames != nil { + in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerImagePolicyConfig. +func (in *KubeAPIServerImagePolicyConfig) DeepCopy() *KubeAPIServerImagePolicyConfig { + if in == nil { + return nil + } + out := new(KubeAPIServerImagePolicyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerProjectConfig) DeepCopyInto(out *KubeAPIServerProjectConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerProjectConfig. +func (in *KubeAPIServerProjectConfig) DeepCopy() *KubeAPIServerProjectConfig { + if in == nil { + return nil + } + out := new(KubeAPIServerProjectConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ServiceServingCert = in.ServiceServingCert + out.ProjectConfig = in.ProjectConfig + if in.ExtendedArguments != nil { + in, out := &in.ExtendedArguments, &out.ExtendedArguments + *out = make(map[string]Arguments, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(Arguments, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerConfig. +func (in *KubeControllerManagerConfig) DeepCopy() *KubeControllerManagerConfig { + if in == nil { + return nil + } + out := new(KubeControllerManagerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeControllerManagerConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerProjectConfig) DeepCopyInto(out *KubeControllerManagerProjectConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerProjectConfig. +func (in *KubeControllerManagerProjectConfig) DeepCopy() *KubeControllerManagerProjectConfig { + if in == nil { + return nil + } + out := new(KubeControllerManagerProjectConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConnectionInfo) DeepCopyInto(out *KubeletConnectionInfo) { + *out = *in + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConnectionInfo. +func (in *KubeletConnectionInfo) DeepCopy() *KubeletConnectionInfo { + if in == nil { + return nil + } + out := new(KubeletConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterAuthConfig) DeepCopyInto(out *MasterAuthConfig) { + *out = *in + if in.RequestHeader != nil { + in, out := &in.RequestHeader, &out.RequestHeader + *out = new(RequestHeaderAuthenticationOptions) + (*in).DeepCopyInto(*out) + } + if in.WebhookTokenAuthenticators != nil { + in, out := &in.WebhookTokenAuthenticators, &out.WebhookTokenAuthenticators + *out = make([]WebhookTokenAuthenticator, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterAuthConfig. +func (in *MasterAuthConfig) DeepCopy() *MasterAuthConfig { + if in == nil { + return nil + } + out := new(MasterAuthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderAuthenticationOptions) DeepCopyInto(out *RequestHeaderAuthenticationOptions) { + *out = *in + if in.ClientCommonNames != nil { + in, out := &in.ClientCommonNames, &out.ClientCommonNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UsernameHeaders != nil { + in, out := &in.UsernameHeaders, &out.UsernameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GroupHeaders != nil { + in, out := &in.GroupHeaders, &out.GroupHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExtraHeaderPrefixes != nil { + in, out := &in.ExtraHeaderPrefixes, &out.ExtraHeaderPrefixes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderAuthenticationOptions. +func (in *RequestHeaderAuthenticationOptions) DeepCopy() *RequestHeaderAuthenticationOptions { + if in == nil { + return nil + } + out := new(RequestHeaderAuthenticationOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceServingCert) DeepCopyInto(out *ServiceServingCert) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceServingCert. +func (in *ServiceServingCert) DeepCopy() *ServiceServingCert { + if in == nil { + return nil + } + out := new(ServiceServingCert) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserAgentDenyRule) DeepCopyInto(out *UserAgentDenyRule) { + *out = *in + in.UserAgentMatchRule.DeepCopyInto(&out.UserAgentMatchRule) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAgentDenyRule. +func (in *UserAgentDenyRule) DeepCopy() *UserAgentDenyRule { + if in == nil { + return nil + } + out := new(UserAgentDenyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserAgentMatchRule) DeepCopyInto(out *UserAgentMatchRule) { + *out = *in + if in.HTTPVerbs != nil { + in, out := &in.HTTPVerbs, &out.HTTPVerbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAgentMatchRule. +func (in *UserAgentMatchRule) DeepCopy() *UserAgentMatchRule { + if in == nil { + return nil + } + out := new(UserAgentMatchRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserAgentMatchingConfig) DeepCopyInto(out *UserAgentMatchingConfig) { + *out = *in + if in.RequiredClients != nil { + in, out := &in.RequiredClients, &out.RequiredClients + *out = make([]UserAgentMatchRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeniedClients != nil { + in, out := &in.DeniedClients, &out.DeniedClients + *out = make([]UserAgentDenyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAgentMatchingConfig. +func (in *UserAgentMatchingConfig) DeepCopy() *UserAgentMatchingConfig { + if in == nil { + return nil + } + out := new(UserAgentMatchingConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookTokenAuthenticator) DeepCopyInto(out *WebhookTokenAuthenticator) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookTokenAuthenticator. +func (in *WebhookTokenAuthenticator) DeepCopy() *WebhookTokenAuthenticator { + if in == nil { + return nil + } + out := new(WebhookTokenAuthenticator) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000..7b5bef143cfa0 --- /dev/null +++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,162 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AggregatorConfig = map[string]string{ + "": "AggregatorConfig holds information required to make the aggregator function.", + "proxyClientInfo": "proxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers", +} + +func (AggregatorConfig) SwaggerDoc() map[string]string { + return map_AggregatorConfig +} + +var map_KubeAPIServerConfig = map[string]string{ + "": "Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "authConfig": "authConfig configures authentication options in addition to the standard oauth token and client certificate authenticators", + "aggregatorConfig": "aggregatorConfig has options for configuring the aggregator component of the API server.", + "kubeletClientInfo": "kubeletClientInfo contains information about how to connect to kubelets", + "servicesSubnet": "servicesSubnet is the subnet to use for assigning service IPs", + "servicesNodePortRange": "servicesNodePortRange is the range to use for assigning service public ports on a host.", + "consolePublicURL": "DEPRECATED: consolePublicURL has been deprecated and setting it has no effect.", + "userAgentMatchingConfig": "userAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", + "imagePolicyConfig": "imagePolicyConfig feeds the image policy admission plugin", + "projectConfig": "projectConfig feeds an admission plugin", + "serviceAccountPublicKeyFiles": "serviceAccountPublicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. (If any file contains a private key, the public portion of the key is used) The list of public keys is used to verify presented service account tokens. Each key is tried in order until the list is exhausted or verification succeeds. If no keys are specified, no service account authentication will be available.", + "oauthConfig": "oauthConfig, if present start the /oauth endpoint in this process", + "minimumKubeletVersion": "minimumKubeletVersion is the lowest version of a kubelet that can join the cluster. Specifically, the apiserver will deny most authorization requests of kubelets that are older than the specified version, only allowing the kubelet to get and update its node object, and perform subjectaccessreviews. This means any kubelet that attempts to join the cluster will not be able to run any assigned workloads, and will eventually be marked as not ready. Its max length is 8, so maximum version allowed is either \"9.999.99\" or \"99.99.99\". Since the kubelet reports the version of the kubernetes release, not Openshift, this field references the underlying kubernetes version this version of Openshift is based off of. In other words: if an admin wishes to ensure no nodes run an older version than Openshift 4.17, then they should set the minimumKubeletVersion to 1.30.0. When comparing versions, the kubelet's version is stripped of any contents outside of major.minor.patch version. Thus, a kubelet with version \"1.0.0-ec.0\" will be compatible with minimumKubeletVersion \"1.0.0\" or earlier.", +} + +func (KubeAPIServerConfig) SwaggerDoc() map[string]string { + return map_KubeAPIServerConfig +} + +var map_KubeAPIServerImagePolicyConfig = map[string]string{ + "internalRegistryHostname": "internalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format.", + "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", +} + +func (KubeAPIServerImagePolicyConfig) SwaggerDoc() map[string]string { + return map_KubeAPIServerImagePolicyConfig +} + +var map_KubeAPIServerProjectConfig = map[string]string{ + "defaultNodeSelector": "defaultNodeSelector holds default project node label selector", +} + +func (KubeAPIServerProjectConfig) SwaggerDoc() map[string]string { + return map_KubeAPIServerProjectConfig +} + +var map_KubeControllerManagerConfig = map[string]string{ + "": "Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "serviceServingCert": "serviceServingCert provides support for the old alpha service serving cert signer CA bundle", + "projectConfig": "projectConfig is an optimization for the daemonset controller", + "extendedArguments": "extendedArguments is used to configure the kube-controller-manager", +} + +func (KubeControllerManagerConfig) SwaggerDoc() map[string]string { + return map_KubeControllerManagerConfig +} + +var map_KubeControllerManagerProjectConfig = map[string]string{ + "defaultNodeSelector": "defaultNodeSelector holds default project node label selector", +} + +func (KubeControllerManagerProjectConfig) SwaggerDoc() map[string]string { + return map_KubeControllerManagerProjectConfig +} + +var map_KubeletConnectionInfo = map[string]string{ + "": "KubeletConnectionInfo holds information necessary for connecting to a kubelet", + "port": "port is the port to connect to kubelets on", + "ca": "ca is the CA for verifying TLS connections to kubelets", +} + +func (KubeletConnectionInfo) SwaggerDoc() map[string]string { + return map_KubeletConnectionInfo +} + +var map_MasterAuthConfig = map[string]string{ + "": "MasterAuthConfig configures authentication options in addition to the standard oauth token and client certificate authenticators", + "requestHeader": "requestHeader holds options for setting up a front proxy against the API. It is optional.", + "webhookTokenAuthenticators": "webhookTokenAuthenticators, if present configures remote token reviewers", + "oauthMetadataFile": "oauthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization Server Metadata for an external OAuth server. See IETF Draft: // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This option is mutually exclusive with OAuthConfig", +} + +func (MasterAuthConfig) SwaggerDoc() map[string]string { + return map_MasterAuthConfig +} + +var map_RequestHeaderAuthenticationOptions = map[string]string{ + "": "RequestHeaderAuthenticationOptions provides options for setting up a front proxy against the entire API instead of against the /oauth endpoint.", + "clientCA": "clientCA is a file with the trusted signer certs. It is required.", + "clientCommonNames": "clientCommonNames is a required list of common names to require a match from.", + "usernameHeaders": "usernameHeaders is the list of headers to check for user information. First hit wins.", + "groupHeaders": "groupHeaders is the set of headers to check for group information. All are unioned.", + "extraHeaderPrefixes": "extraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested.", +} + +func (RequestHeaderAuthenticationOptions) SwaggerDoc() map[string]string { + return map_RequestHeaderAuthenticationOptions +} + +var map_ServiceServingCert = map[string]string{ + "": "ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.", + "certFile": "certFile is a file containing a PEM-encoded certificate", +} + +func (ServiceServingCert) SwaggerDoc() map[string]string { + return map_ServiceServingCert +} + +var map_UserAgentDenyRule = map[string]string{ + "": "UserAgentDenyRule adds a rejection message that can be used to help a user figure out how to get an approved client", + "rejectionMessage": "rejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used.", +} + +func (UserAgentDenyRule) SwaggerDoc() map[string]string { + return map_UserAgentDenyRule +} + +var map_UserAgentMatchRule = map[string]string{ + "": "UserAgentMatchRule describes how to match a given request based on User-Agent and HTTPVerb", + "regex": "regex is a regex that is checked against the User-Agent. Known variants of oc clients 1. oc accessing kube resources: oc/v1.2.0 (linux/amd64) kubernetes/bc4550d 2. oc accessing openshift resources: oc/v1.1.3 (linux/amd64) openshift/b348c2f 3. openshift kubectl accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d 4. openshift kubectl accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f 5. oadm accessing kube resources: oadm/v1.2.0 (linux/amd64) kubernetes/bc4550d 6. oadm accessing openshift resources: oadm/v1.1.3 (linux/amd64) openshift/b348c2f 7. openshift cli accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d 8. openshift cli accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f", + "httpVerbs": "httpVerbs specifies which HTTP verbs should be matched. An empty list means \"match all verbs\".", +} + +func (UserAgentMatchRule) SwaggerDoc() map[string]string { + return map_UserAgentMatchRule +} + +var map_UserAgentMatchingConfig = map[string]string{ + "": "UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", + "requiredClients": "requiredClients if this list is non-empty, then a User-Agent must match one of the UserAgentRegexes to be allowed", + "deniedClients": "deniedClients if this list is non-empty, then a User-Agent must not match any of the UserAgentRegexes", + "defaultRejectionMessage": "defaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given.", +} + +func (UserAgentMatchingConfig) SwaggerDoc() map[string]string { + return map_UserAgentMatchingConfig +} + +var map_WebhookTokenAuthenticator = map[string]string{ + "": "WebhookTokenAuthenticators holds the necessary configuation options for external token authenticators", + "configFile": "configFile is a path to a Kubeconfig file with the webhook configuration", + "cacheTTL": "cacheTTL indicates how long an authentication result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get a default timeout of 2 minutes. If zero (e.g. \"0m\"), caching is disabled", +} + +func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string { + return map_WebhookTokenAuthenticator +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/network/v1/Makefile b/vendor/github.com/openshift/api/network/v1/Makefile new file mode 100644 index 0000000000000..027afff7ca4cc --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="network.openshift.io/v1" diff --git a/vendor/github.com/openshift/api/network/v1/constants.go b/vendor/github.com/openshift/api/network/v1/constants.go new file mode 100644 index 0000000000000..54c06f331909e --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/constants.go @@ -0,0 +1,17 @@ +package v1 + +const ( + // Pod annotations + AssignMacvlanAnnotation = "pod.network.openshift.io/assign-macvlan" + + // HostSubnet annotations. (Note: should be "hostsubnet.network.openshift.io/", but the incorrect name is now part of the API.) + AssignHostSubnetAnnotation = "pod.network.openshift.io/assign-subnet" + FixedVNIDHostAnnotation = "pod.network.openshift.io/fixed-vnid-host" + NodeUIDAnnotation = "pod.network.openshift.io/node-uid" + + // NetNamespace annotations + MulticastEnabledAnnotation = "netnamespace.network.openshift.io/multicast-enabled" + + // ChangePodNetworkAnnotation is an annotation on NetNamespace to request change of pod network + ChangePodNetworkAnnotation string = "pod.network.openshift.io/multitenant.change-network" +) diff --git a/vendor/github.com/openshift/api/network/v1/doc.go b/vendor/github.com/openshift/api/network/v1/doc.go new file mode 100644 index 0000000000000..2816420d9688c --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/network/apis/network +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=network.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/network/v1/generated.pb.go b/vendor/github.com/openshift/api/network/v1/generated.pb.go new file mode 100644 index 0000000000000..9534e3715561c --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/generated.pb.go @@ -0,0 +1,3186 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/network/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ClusterNetwork) Reset() { *m = ClusterNetwork{} } +func (*ClusterNetwork) ProtoMessage() {} +func (*ClusterNetwork) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{0} +} +func (m *ClusterNetwork) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterNetwork) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterNetwork) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterNetwork.Merge(m, src) +} +func (m *ClusterNetwork) XXX_Size() int { + return m.Size() +} +func (m *ClusterNetwork) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterNetwork.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterNetwork proto.InternalMessageInfo + +func (m *ClusterNetworkEntry) Reset() { *m = ClusterNetworkEntry{} } +func (*ClusterNetworkEntry) ProtoMessage() {} +func (*ClusterNetworkEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{1} +} +func (m *ClusterNetworkEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterNetworkEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterNetworkEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterNetworkEntry.Merge(m, src) +} +func (m *ClusterNetworkEntry) XXX_Size() int { + return m.Size() +} +func (m *ClusterNetworkEntry) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterNetworkEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterNetworkEntry proto.InternalMessageInfo + +func (m *ClusterNetworkList) Reset() { *m = ClusterNetworkList{} } +func (*ClusterNetworkList) ProtoMessage() {} +func (*ClusterNetworkList) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{2} +} +func (m *ClusterNetworkList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterNetworkList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterNetworkList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterNetworkList.Merge(m, src) +} +func (m *ClusterNetworkList) XXX_Size() int { + return m.Size() +} +func (m *ClusterNetworkList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterNetworkList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterNetworkList proto.InternalMessageInfo + +func (m *EgressNetworkPolicy) Reset() { *m = EgressNetworkPolicy{} } +func (*EgressNetworkPolicy) ProtoMessage() {} +func (*EgressNetworkPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{3} +} +func (m *EgressNetworkPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EgressNetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EgressNetworkPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressNetworkPolicy.Merge(m, src) +} +func (m *EgressNetworkPolicy) XXX_Size() int { + return m.Size() +} +func (m *EgressNetworkPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_EgressNetworkPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressNetworkPolicy proto.InternalMessageInfo + +func (m *EgressNetworkPolicyList) Reset() { *m = EgressNetworkPolicyList{} } +func (*EgressNetworkPolicyList) ProtoMessage() {} +func (*EgressNetworkPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{4} +} +func (m *EgressNetworkPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EgressNetworkPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EgressNetworkPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressNetworkPolicyList.Merge(m, src) +} +func (m *EgressNetworkPolicyList) XXX_Size() int { + return m.Size() +} +func (m *EgressNetworkPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_EgressNetworkPolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressNetworkPolicyList proto.InternalMessageInfo + +func (m *EgressNetworkPolicyPeer) Reset() { *m = EgressNetworkPolicyPeer{} } +func (*EgressNetworkPolicyPeer) ProtoMessage() {} +func (*EgressNetworkPolicyPeer) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{5} +} +func (m *EgressNetworkPolicyPeer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EgressNetworkPolicyPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EgressNetworkPolicyPeer) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressNetworkPolicyPeer.Merge(m, src) +} +func (m *EgressNetworkPolicyPeer) XXX_Size() int { + return m.Size() +} +func (m *EgressNetworkPolicyPeer) XXX_DiscardUnknown() { + xxx_messageInfo_EgressNetworkPolicyPeer.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressNetworkPolicyPeer proto.InternalMessageInfo + +func (m *EgressNetworkPolicyRule) Reset() { *m = EgressNetworkPolicyRule{} } +func (*EgressNetworkPolicyRule) ProtoMessage() {} +func (*EgressNetworkPolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{6} +} +func (m *EgressNetworkPolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EgressNetworkPolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EgressNetworkPolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressNetworkPolicyRule.Merge(m, src) +} +func (m *EgressNetworkPolicyRule) XXX_Size() int { + return m.Size() +} +func (m *EgressNetworkPolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_EgressNetworkPolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressNetworkPolicyRule proto.InternalMessageInfo + +func (m *EgressNetworkPolicySpec) Reset() { *m = EgressNetworkPolicySpec{} } +func (*EgressNetworkPolicySpec) ProtoMessage() {} +func (*EgressNetworkPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{7} +} +func (m *EgressNetworkPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EgressNetworkPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EgressNetworkPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressNetworkPolicySpec.Merge(m, src) +} +func (m *EgressNetworkPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *EgressNetworkPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_EgressNetworkPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressNetworkPolicySpec proto.InternalMessageInfo + +func (m *HostSubnet) Reset() { *m = HostSubnet{} } +func (*HostSubnet) ProtoMessage() {} +func (*HostSubnet) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{8} +} +func (m *HostSubnet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostSubnet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostSubnet) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostSubnet.Merge(m, src) +} +func (m *HostSubnet) XXX_Size() int { + return m.Size() +} +func (m *HostSubnet) XXX_DiscardUnknown() { + xxx_messageInfo_HostSubnet.DiscardUnknown(m) +} + +var xxx_messageInfo_HostSubnet proto.InternalMessageInfo + +func (m *HostSubnetList) Reset() { *m = HostSubnetList{} } +func (*HostSubnetList) ProtoMessage() {} +func (*HostSubnetList) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{9} +} +func (m *HostSubnetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostSubnetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostSubnetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostSubnetList.Merge(m, src) +} +func (m *HostSubnetList) XXX_Size() int { + return m.Size() +} +func (m *HostSubnetList) XXX_DiscardUnknown() { + xxx_messageInfo_HostSubnetList.DiscardUnknown(m) +} + +var xxx_messageInfo_HostSubnetList proto.InternalMessageInfo + +func (m *NetNamespace) Reset() { *m = NetNamespace{} } +func (*NetNamespace) ProtoMessage() {} +func (*NetNamespace) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{10} +} +func (m *NetNamespace) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetNamespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetNamespace) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetNamespace.Merge(m, src) +} +func (m *NetNamespace) XXX_Size() int { + return m.Size() +} +func (m *NetNamespace) XXX_DiscardUnknown() { + xxx_messageInfo_NetNamespace.DiscardUnknown(m) +} + +var xxx_messageInfo_NetNamespace proto.InternalMessageInfo + +func (m *NetNamespaceList) Reset() { *m = NetNamespaceList{} } +func (*NetNamespaceList) ProtoMessage() {} +func (*NetNamespaceList) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{11} +} +func (m *NetNamespaceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetNamespaceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetNamespaceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetNamespaceList.Merge(m, src) +} +func (m *NetNamespaceList) XXX_Size() int { + return m.Size() +} +func (m *NetNamespaceList) XXX_DiscardUnknown() { + xxx_messageInfo_NetNamespaceList.DiscardUnknown(m) +} + +var xxx_messageInfo_NetNamespaceList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ClusterNetwork)(nil), "github.com.openshift.api.network.v1.ClusterNetwork") + proto.RegisterType((*ClusterNetworkEntry)(nil), "github.com.openshift.api.network.v1.ClusterNetworkEntry") + proto.RegisterType((*ClusterNetworkList)(nil), "github.com.openshift.api.network.v1.ClusterNetworkList") + proto.RegisterType((*EgressNetworkPolicy)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicy") + proto.RegisterType((*EgressNetworkPolicyList)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicyList") + proto.RegisterType((*EgressNetworkPolicyPeer)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicyPeer") + proto.RegisterType((*EgressNetworkPolicyRule)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicyRule") + proto.RegisterType((*EgressNetworkPolicySpec)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicySpec") + proto.RegisterType((*HostSubnet)(nil), "github.com.openshift.api.network.v1.HostSubnet") + proto.RegisterType((*HostSubnetList)(nil), "github.com.openshift.api.network.v1.HostSubnetList") + proto.RegisterType((*NetNamespace)(nil), "github.com.openshift.api.network.v1.NetNamespace") + proto.RegisterType((*NetNamespaceList)(nil), "github.com.openshift.api.network.v1.NetNamespaceList") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/network/v1/generated.proto", fileDescriptor_38d1cb27735fa5d9) +} + +var fileDescriptor_38d1cb27735fa5d9 = []byte{ + // 996 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0xe3, 0x44, + 0x14, 0xaf, 0xf3, 0xa7, 0x6d, 0x26, 0x6d, 0x5a, 0xcd, 0x56, 0xac, 0x29, 0x92, 0x13, 0xb9, 0x02, + 0x82, 0x56, 0xd8, 0xb4, 0x8b, 0x50, 0x0f, 0x08, 0xb4, 0x6e, 0x2b, 0x6d, 0xa4, 0x6e, 0x88, 0x26, + 0x65, 0x55, 0x21, 0x40, 0xb8, 0xce, 0xac, 0x63, 0x9a, 0xd8, 0x96, 0x67, 0x12, 0x88, 0x10, 0x7f, + 0x2e, 0xdc, 0xf9, 0x00, 0x7c, 0x0c, 0x3e, 0x02, 0x87, 0x1e, 0x38, 0xec, 0x09, 0xf6, 0x14, 0x51, + 0x73, 0xe7, 0x03, 0xf4, 0x84, 0x66, 0x3c, 0x8e, 0xed, 0xac, 0x2b, 0xa2, 0x22, 0x72, 0x4a, 0xe6, + 0xfd, 0xde, 0xdf, 0xf9, 0xbd, 0xf7, 0xc6, 0xe0, 0xa1, 0xed, 0xd0, 0xfe, 0xe8, 0x42, 0xb3, 0xbc, + 0xa1, 0xee, 0xf9, 0xd8, 0x25, 0x7d, 0xe7, 0x19, 0xd5, 0x4d, 0xdf, 0xd1, 0x5d, 0x4c, 0xbf, 0xf2, + 0x82, 0x4b, 0x7d, 0xbc, 0xaf, 0xdb, 0xd8, 0xc5, 0x81, 0x49, 0x71, 0x4f, 0xf3, 0x03, 0x8f, 0x7a, + 0x70, 0x2f, 0x31, 0xd2, 0x66, 0x46, 0x9a, 0xe9, 0x3b, 0x9a, 0x30, 0xd2, 0xc6, 0xfb, 0xbb, 0x6f, + 0xa7, 0x3c, 0xdb, 0x9e, 0xed, 0xe9, 0xdc, 0xf6, 0x62, 0xf4, 0x8c, 0x9f, 0xf8, 0x81, 0xff, 0x8b, + 0x7c, 0xee, 0xbe, 0x7b, 0x79, 0x48, 0x34, 0xc7, 0x63, 0xa1, 0x87, 0xa6, 0xd5, 0x77, 0x5c, 0x1c, + 0x4c, 0x74, 0xff, 0xd2, 0x66, 0x02, 0xa2, 0x0f, 0x31, 0x35, 0x73, 0x32, 0xd9, 0x7d, 0xef, 0x36, + 0xab, 0x60, 0xe4, 0x52, 0x67, 0x88, 0x75, 0x62, 0xf5, 0xf1, 0xd0, 0x9c, 0xb7, 0x53, 0x7f, 0x2e, + 0x81, 0xda, 0xd1, 0x60, 0x44, 0x28, 0x0e, 0xda, 0x51, 0xca, 0xf0, 0x0b, 0xb0, 0xce, 0xa2, 0xf4, + 0x4c, 0x6a, 0xca, 0x52, 0x43, 0x6a, 0x56, 0x0f, 0xde, 0xd1, 0x22, 0xef, 0x5a, 0xda, 0xbb, 0xe6, + 0x5f, 0xda, 0x4c, 0x40, 0x34, 0xa6, 0xad, 0x8d, 0xf7, 0xb5, 0x8f, 0x2e, 0xbe, 0xc4, 0x16, 0x7d, + 0x82, 0xa9, 0x69, 0xc0, 0xab, 0x69, 0x7d, 0x25, 0x9c, 0xd6, 0x41, 0x22, 0x43, 0x33, 0xaf, 0xf0, + 0x2d, 0xb0, 0x26, 0xee, 0x47, 0x2e, 0x34, 0xa4, 0x66, 0xc5, 0xd8, 0x12, 0xea, 0x6b, 0x22, 0x07, + 0x14, 0xe3, 0xf0, 0x18, 0x6c, 0xf7, 0x3d, 0x42, 0xc9, 0xe8, 0xc2, 0xc5, 0x74, 0x80, 0x5d, 0x9b, + 0xf6, 0xe5, 0x62, 0x43, 0x6a, 0x6e, 0x1a, 0xb2, 0xb0, 0xd9, 0x7e, 0xec, 0x11, 0xda, 0xe5, 0xf8, + 0x29, 0xc7, 0xd1, 0x4b, 0x16, 0xf0, 0x03, 0x50, 0x23, 0x38, 0x18, 0x3b, 0x16, 0x16, 0x01, 0xe4, + 0x12, 0x8f, 0xfb, 0x8a, 0xf0, 0x51, 0xeb, 0x66, 0x50, 0x34, 0xa7, 0x0d, 0x0f, 0x00, 0xf0, 0x07, + 0x23, 0xdb, 0x71, 0xdb, 0xe6, 0x10, 0xcb, 0x65, 0x6e, 0x3b, 0x2b, 0xb1, 0x33, 0x43, 0x50, 0x4a, + 0x0b, 0x7e, 0x03, 0xb6, 0xac, 0xcc, 0xc5, 0x12, 0x79, 0xb5, 0x51, 0x6c, 0x56, 0x0f, 0x0e, 0xb5, + 0x05, 0xba, 0x46, 0xcb, 0x92, 0x72, 0xe2, 0xd2, 0x60, 0x62, 0xdc, 0x17, 0x21, 0xb7, 0xb2, 0x20, + 0x41, 0xf3, 0x91, 0xe0, 0x03, 0x50, 0x19, 0x7f, 0x3d, 0x30, 0xdd, 0x8e, 0x17, 0x50, 0x79, 0x8d, + 0xdf, 0xd7, 0x66, 0x38, 0xad, 0x57, 0x9e, 0x9e, 0x9f, 0x3e, 0x6a, 0x33, 0x21, 0x4a, 0x70, 0xf8, + 0x2a, 0x28, 0x0e, 0xe9, 0x48, 0x5e, 0xe7, 0x6a, 0x6b, 0xe1, 0xb4, 0x5e, 0x7c, 0x72, 0xf6, 0x31, + 0x62, 0x32, 0xf5, 0x5b, 0x70, 0x2f, 0x27, 0x11, 0xd8, 0x00, 0x25, 0xcb, 0xe9, 0x05, 0xbc, 0x3d, + 0x2a, 0xc6, 0x86, 0x48, 0xab, 0x74, 0xd4, 0x3a, 0x46, 0x88, 0x23, 0x31, 0x6f, 0x69, 0x5e, 0x38, + 0xd7, 0xff, 0xca, 0x5b, 0x5a, 0xa2, 0xfe, 0x26, 0x01, 0x98, 0x8d, 0x7f, 0xea, 0x10, 0x0a, 0x3f, + 0x7d, 0xa9, 0x43, 0xb5, 0xc5, 0x3a, 0x94, 0x59, 0xf3, 0xfe, 0xdc, 0x16, 0x49, 0xac, 0xc7, 0x92, + 0x54, 0x77, 0x9e, 0x83, 0xb2, 0x43, 0xf1, 0x90, 0xc8, 0x05, 0x4e, 0xd7, 0xc3, 0x3b, 0xd0, 0x65, + 0x6c, 0x0a, 0xff, 0xe5, 0x16, 0xf3, 0x84, 0x22, 0x87, 0xea, 0x1f, 0x12, 0xb8, 0x77, 0x62, 0x07, + 0x98, 0x10, 0xa1, 0xd7, 0xf1, 0x06, 0x8e, 0x35, 0x59, 0xc2, 0xc4, 0x7d, 0x0e, 0x4a, 0xc4, 0xc7, + 0x16, 0xa7, 0xa0, 0x7a, 0xf0, 0xfe, 0x42, 0x25, 0xe5, 0x64, 0xda, 0xf5, 0xb1, 0x95, 0xd0, 0xcd, + 0x4e, 0x88, 0xfb, 0x55, 0x7f, 0x97, 0xc0, 0xfd, 0x1c, 0xfd, 0x25, 0xb0, 0xf5, 0x59, 0x96, 0xad, + 0xc3, 0xbb, 0x96, 0x76, 0x0b, 0x65, 0xdf, 0xe5, 0xd6, 0xd5, 0xc1, 0x38, 0x80, 0x87, 0x60, 0x83, + 0xb5, 0x7a, 0x17, 0x0f, 0xb0, 0x45, 0xbd, 0x78, 0x18, 0x76, 0x84, 0x9b, 0x0d, 0x36, 0x0c, 0x31, + 0x86, 0x32, 0x9a, 0x6c, 0xff, 0xf5, 0x5c, 0xc2, 0x77, 0xc9, 0xdc, 0xfe, 0x3b, 0x6e, 0x77, 0xf9, + 0x22, 0x89, 0x71, 0xf5, 0x97, 0xfc, 0x8b, 0x45, 0xa3, 0x01, 0x86, 0x1f, 0x82, 0x12, 0x9d, 0xf8, + 0x58, 0x04, 0x7e, 0x10, 0xd3, 0x72, 0x36, 0xf1, 0xf1, 0xcd, 0xb4, 0xfe, 0xda, 0x2d, 0x66, 0x0c, + 0x46, 0xdc, 0x10, 0x9e, 0x83, 0x02, 0xf5, 0xfe, 0x6b, 0x4f, 0xb0, 0xbb, 0x30, 0x80, 0x08, 0x5e, + 0x38, 0xf3, 0x50, 0x81, 0x7a, 0xea, 0xf7, 0xb9, 0x59, 0xb3, 0x86, 0x81, 0x3d, 0xb0, 0x8a, 0x39, + 0x24, 0x4b, 0x9c, 0xb1, 0x3b, 0x07, 0x66, 0xc5, 0x18, 0x35, 0x11, 0x78, 0x35, 0x52, 0x40, 0xc2, + 0xb7, 0xfa, 0x77, 0x01, 0x80, 0x64, 0xc1, 0x2c, 0x61, 0xc2, 0x1a, 0xa0, 0xc4, 0xd6, 0x97, 0x20, + 0x74, 0x36, 0x23, 0x2c, 0x07, 0xc4, 0x11, 0xf8, 0x06, 0x58, 0x65, 0xbf, 0xad, 0x0e, 0x7f, 0xc0, + 0x2a, 0x49, 0xea, 0x8f, 0xb9, 0x14, 0x09, 0x94, 0xe9, 0x45, 0x8f, 0x97, 0x78, 0xa4, 0x66, 0x7a, + 0x51, 0x2d, 0x48, 0xa0, 0xf0, 0x11, 0xa8, 0x44, 0xc5, 0xb6, 0x3a, 0x44, 0x2e, 0x37, 0x8a, 0xcd, + 0x8a, 0xb1, 0xc7, 0x76, 0xfc, 0x49, 0x2c, 0xbc, 0x99, 0xd6, 0x61, 0x72, 0x07, 0xb1, 0x18, 0x25, + 0x56, 0xb0, 0x05, 0xaa, 0xd1, 0x81, 0x35, 0x6b, 0xf4, 0x3e, 0x55, 0x8c, 0x37, 0xc3, 0x69, 0xbd, + 0x7a, 0x92, 0x88, 0x6f, 0xa6, 0xf5, 0x9d, 0x79, 0x37, 0x7c, 0xd3, 0xa7, 0x6d, 0xd5, 0x5f, 0x25, + 0x50, 0x4b, 0x6d, 0xf4, 0xff, 0x7f, 0xf0, 0xcf, 0xb2, 0x83, 0xaf, 0x2f, 0xd4, 0x46, 0x49, 0x86, + 0xb7, 0xcc, 0xfb, 0x8f, 0x05, 0xb0, 0xd1, 0xc6, 0x94, 0xcd, 0x1e, 0xf1, 0x4d, 0x0b, 0x2f, 0xed, + 0x6b, 0xc8, 0xcd, 0xd9, 0x06, 0x22, 0x11, 0x14, 0xe3, 0x70, 0x0f, 0x94, 0x5d, 0x4c, 0x9d, 0x9e, + 0xf8, 0x04, 0x9a, 0x95, 0xd0, 0xc6, 0xb4, 0x75, 0x8c, 0x22, 0x0c, 0x1e, 0xa5, 0xfb, 0xa2, 0xc4, + 0x29, 0x7d, 0x7d, 0xbe, 0x2f, 0x76, 0xd2, 0x35, 0xe6, 0x74, 0x86, 0x7a, 0x25, 0x81, 0xed, 0xb4, + 0xce, 0x12, 0x08, 0x7d, 0x9a, 0x25, 0x74, 0x7f, 0x21, 0x42, 0xd3, 0x39, 0xe6, 0x53, 0x6a, 0xb4, + 0xae, 0xae, 0x95, 0x95, 0xe7, 0xd7, 0xca, 0xca, 0x8b, 0x6b, 0x65, 0xe5, 0x87, 0x50, 0x91, 0xae, + 0x42, 0x45, 0x7a, 0x1e, 0x2a, 0xd2, 0x8b, 0x50, 0x91, 0xfe, 0x0c, 0x15, 0xe9, 0xa7, 0xbf, 0x94, + 0x95, 0x4f, 0xf6, 0x16, 0xf8, 0xfe, 0xff, 0x27, 0x00, 0x00, 0xff, 0xff, 0x6b, 0x4d, 0xd5, 0x11, + 0x25, 0x0c, 0x00, 0x00, +} + +func (m *ClusterNetwork) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterNetwork) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterNetwork) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MTU != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MTU)) + i-- + dAtA[i] = 0x40 + } + if m.VXLANPort != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.VXLANPort)) + i-- + dAtA[i] = 0x38 + } + if len(m.ClusterNetworks) > 0 { + for iNdEx := len(m.ClusterNetworks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClusterNetworks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.PluginName) + copy(dAtA[i:], m.PluginName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PluginName))) + i-- + dAtA[i] = 0x2a + i -= len(m.ServiceNetwork) + copy(dAtA[i:], m.ServiceNetwork) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceNetwork))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.HostSubnetLength)) + i-- + dAtA[i] = 0x18 + i -= len(m.Network) + copy(dAtA[i:], m.Network) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Network))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterNetworkEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterNetworkEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterNetworkEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.HostSubnetLength)) + i-- + dAtA[i] = 0x10 + i -= len(m.CIDR) + copy(dAtA[i:], m.CIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterNetworkList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterNetworkList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterNetworkList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EgressNetworkPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EgressNetworkPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EgressNetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EgressNetworkPolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EgressNetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EgressNetworkPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EgressNetworkPolicyPeer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EgressNetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EgressNetworkPolicyPeer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DNSName) + copy(dAtA[i:], m.DNSName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSName))) + i-- + dAtA[i] = 0x12 + i -= len(m.CIDRSelector) + copy(dAtA[i:], m.CIDRSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDRSelector))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EgressNetworkPolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EgressNetworkPolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EgressNetworkPolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EgressNetworkPolicySpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EgressNetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EgressNetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Egress) > 0 { + for iNdEx := len(m.Egress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Egress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *HostSubnet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostSubnet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostSubnet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.EgressCIDRs) > 0 { + for iNdEx := len(m.EgressCIDRs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.EgressCIDRs[iNdEx]) + copy(dAtA[i:], m.EgressCIDRs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EgressCIDRs[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.EgressIPs) > 0 { + for iNdEx := len(m.EgressIPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.EgressIPs[iNdEx]) + copy(dAtA[i:], m.EgressIPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EgressIPs[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i -= len(m.Subnet) + copy(dAtA[i:], m.Subnet) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subnet))) + i-- + dAtA[i] = 0x22 + i -= len(m.HostIP) + copy(dAtA[i:], m.HostIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) + i-- + dAtA[i] = 0x1a + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HostSubnetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostSubnetList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostSubnetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NetNamespace) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetNamespace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetNamespace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.EgressIPs) > 0 { + for iNdEx := len(m.EgressIPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.EgressIPs[iNdEx]) + copy(dAtA[i:], m.EgressIPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EgressIPs[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.NetID)) + i-- + dAtA[i] = 0x18 + i -= len(m.NetName) + copy(dAtA[i:], m.NetName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NetName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NetNamespaceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetNamespaceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetNamespaceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ClusterNetwork) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Network) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HostSubnetLength)) + l = len(m.ServiceNetwork) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PluginName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ClusterNetworks) > 0 { + for _, e := range m.ClusterNetworks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.VXLANPort != nil { + n += 1 + sovGenerated(uint64(*m.VXLANPort)) + } + if m.MTU != nil { + n += 1 + sovGenerated(uint64(*m.MTU)) + } + return n +} + +func (m *ClusterNetworkEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CIDR) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HostSubnetLength)) + return n +} + +func (m *ClusterNetworkList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EgressNetworkPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EgressNetworkPolicyList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EgressNetworkPolicyPeer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CIDRSelector) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DNSName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EgressNetworkPolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = m.To.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EgressNetworkPolicySpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Egress) > 0 { + for _, e := range m.Egress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HostSubnet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HostIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subnet) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.EgressIPs) > 0 { + for _, s := range m.EgressIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.EgressCIDRs) > 0 { + for _, s := range m.EgressCIDRs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HostSubnetList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetNamespace) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NetName) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.NetID)) + if len(m.EgressIPs) > 0 { + for _, s := range m.EgressIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetNamespaceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterNetwork) String() string { + if this == nil { + return "nil" + } + repeatedStringForClusterNetworks := "[]ClusterNetworkEntry{" + for _, f := range this.ClusterNetworks { + repeatedStringForClusterNetworks += strings.Replace(strings.Replace(f.String(), "ClusterNetworkEntry", "ClusterNetworkEntry", 1), `&`, ``, 1) + "," + } + repeatedStringForClusterNetworks += "}" + s := strings.Join([]string{`&ClusterNetwork{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Network:` + fmt.Sprintf("%v", this.Network) + `,`, + `HostSubnetLength:` + fmt.Sprintf("%v", this.HostSubnetLength) + `,`, + `ServiceNetwork:` + fmt.Sprintf("%v", this.ServiceNetwork) + `,`, + `PluginName:` + fmt.Sprintf("%v", this.PluginName) + `,`, + `ClusterNetworks:` + repeatedStringForClusterNetworks + `,`, + `VXLANPort:` + valueToStringGenerated(this.VXLANPort) + `,`, + `MTU:` + valueToStringGenerated(this.MTU) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterNetworkEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterNetworkEntry{`, + `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`, + `HostSubnetLength:` + fmt.Sprintf("%v", this.HostSubnetLength) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterNetworkList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterNetwork{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterNetwork", "ClusterNetwork", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterNetworkList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *EgressNetworkPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EgressNetworkPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "EgressNetworkPolicySpec", "EgressNetworkPolicySpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EgressNetworkPolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]EgressNetworkPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "EgressNetworkPolicy", "EgressNetworkPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EgressNetworkPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *EgressNetworkPolicyPeer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EgressNetworkPolicyPeer{`, + `CIDRSelector:` + fmt.Sprintf("%v", this.CIDRSelector) + `,`, + `DNSName:` + fmt.Sprintf("%v", this.DNSName) + `,`, + `}`, + }, "") + return s +} +func (this *EgressNetworkPolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EgressNetworkPolicyRule{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `To:` + strings.Replace(strings.Replace(this.To.String(), "EgressNetworkPolicyPeer", "EgressNetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EgressNetworkPolicySpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForEgress := "[]EgressNetworkPolicyRule{" + for _, f := range this.Egress { + repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "EgressNetworkPolicyRule", "EgressNetworkPolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForEgress += "}" + s := strings.Join([]string{`&EgressNetworkPolicySpec{`, + `Egress:` + repeatedStringForEgress + `,`, + `}`, + }, "") + return s +} +func (this *HostSubnet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostSubnet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, + `Subnet:` + fmt.Sprintf("%v", this.Subnet) + `,`, + `EgressIPs:` + fmt.Sprintf("%v", this.EgressIPs) + `,`, + `EgressCIDRs:` + fmt.Sprintf("%v", this.EgressCIDRs) + `,`, + `}`, + }, "") + return s +} +func (this *HostSubnetList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]HostSubnet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HostSubnet", "HostSubnet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&HostSubnetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NetNamespace) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetNamespace{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `NetName:` + fmt.Sprintf("%v", this.NetName) + `,`, + `NetID:` + fmt.Sprintf("%v", this.NetID) + `,`, + `EgressIPs:` + fmt.Sprintf("%v", this.EgressIPs) + `,`, + `}`, + }, "") + return s +} +func (this *NetNamespaceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]NetNamespace{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetNamespace", "NetNamespace", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&NetNamespaceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ClusterNetwork) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterNetwork: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterNetwork: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Network = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostSubnetLength", wireType) + } + m.HostSubnetLength = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HostSubnetLength |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceNetwork", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceNetwork = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PluginName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PluginName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterNetworks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterNetworks = append(m.ClusterNetworks, ClusterNetworkEntry{}) + if err := m.ClusterNetworks[len(m.ClusterNetworks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field VXLANPort", wireType) + } + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.VXLANPort = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MTU", wireType) + } + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MTU = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterNetworkEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterNetworkEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterNetworkEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CIDR = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostSubnetLength", wireType) + } + m.HostSubnetLength = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HostSubnetLength |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterNetworkList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterNetworkList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterNetworkList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterNetwork{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EgressNetworkPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EgressNetworkPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EgressNetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EgressNetworkPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EgressNetworkPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EgressNetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, EgressNetworkPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EgressNetworkPolicyPeer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EgressNetworkPolicyPeer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EgressNetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CIDRSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CIDRSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DNSName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EgressNetworkPolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EgressNetworkPolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EgressNetworkPolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = EgressNetworkPolicyRuleType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EgressNetworkPolicySpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EgressNetworkPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EgressNetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Egress = append(m.Egress, EgressNetworkPolicyRule{}) + if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostSubnet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostSubnet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostSubnet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subnet", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subnet = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EgressIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EgressIPs = append(m.EgressIPs, HostSubnetEgressIP(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EgressCIDRs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EgressCIDRs = append(m.EgressCIDRs, HostSubnetEgressCIDR(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostSubnetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostSubnetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostSubnetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, HostSubnet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetNamespace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetNamespace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetNamespace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NetID", wireType) + } + m.NetID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NetID |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EgressIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EgressIPs = append(m.EgressIPs, NetNamespaceEgressIP(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetNamespaceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetNamespaceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetNamespaceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, NetNamespace{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/network/v1/generated.proto b/vendor/github.com/openshift/api/network/v1/generated.proto new file mode 100644 index 0000000000000..4fc68a9740214 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/generated.proto @@ -0,0 +1,255 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.network.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/network/v1"; + +// ClusterNetwork was used by OpenShift SDN. +// DEPRECATED: OpenShift SDN is no longer supported and this object is no longer used in +// any way by OpenShift. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=clusternetworks,scope=Cluster +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/527 +// +openshift:file-pattern=operatorOrdering=001 +// +kubebuilder:printcolumn:name="Cluster Network",type=string,JSONPath=.network,description="The primary cluster network CIDR" +// +kubebuilder:printcolumn:name="Service Network",type=string,JSONPath=.serviceNetwork,description="The service network CIDR" +// +kubebuilder:printcolumn:name="Plugin Name",type=string,JSONPath=.pluginName,description="The OpenShift SDN network plug-in in use" +// +openshift:compatibility-gen:level=1 +message ClusterNetwork { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // network is a CIDR string specifying the global overlay network's L3 space + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + optional string network = 2; + + // hostsubnetlength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=30 + optional uint32 hostsubnetlength = 3; + + // serviceNetwork is the CIDR range that Service IP addresses are allocated from + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + optional string serviceNetwork = 4; + + // pluginName is the name of the network plugin being used + optional string pluginName = 5; + + // clusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from. + repeated ClusterNetworkEntry clusterNetworks = 6; + + // vxlanPort sets the VXLAN destination port used by the cluster. + // It is set by the master configuration file on startup and cannot be edited manually. + // Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. + // Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +optional + optional uint32 vxlanPort = 7; + + // mtu is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator. + // +kubebuilder:validation:Minimum=576 + // +kubebuilder:validation:Maximum=65536 + // +optional + optional uint32 mtu = 8; +} + +// ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips. +message ClusterNetworkEntry { + // CIDR defines the total range of a cluster networks address space. + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + optional string cidr = 1; + + // hostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods. + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=30 + optional uint32 hostSubnetLength = 2; +} + +// ClusterNetworkList is a collection of ClusterNetworks +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ClusterNetworkList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of cluster networks + repeated ClusterNetwork items = 2; +} + +// EgressNetworkPolicy was used by OpenShift SDN. +// DEPRECATED: OpenShift SDN is no longer supported and this object is no longer used in +// any way by OpenShift. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=egressnetworkpolicies,scope=Namespaced +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/527 +// +openshift:file-pattern=operatorOrdering=004 +// +openshift:compatibility-gen:level=1 +message EgressNetworkPolicy { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the specification of the current egress network policy + optional EgressNetworkPolicySpec spec = 2; +} + +// EgressNetworkPolicyList is a collection of EgressNetworkPolicy +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message EgressNetworkPolicyList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of policies + repeated EgressNetworkPolicy items = 2; +} + +// EgressNetworkPolicyPeer specifies a target to apply egress network policy to +message EgressNetworkPolicyPeer { + // cidrSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset + // Ideally we would have liked to use the cidr openapi format for this property. + // But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs + // We are therefore using a regex pattern to validate instead. + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + optional string cidrSelector = 1; + + // dnsName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset + // +kubebuilder:validation:Pattern=`^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$` + optional string dnsName = 2; +} + +// EgressNetworkPolicyRule contains a single egress network policy rule +message EgressNetworkPolicyRule { + // type marks this as an "Allow" or "Deny" rule + optional string type = 1; + + // to is the target that traffic is allowed/denied to + optional EgressNetworkPolicyPeer to = 2; +} + +// EgressNetworkPolicySpec provides a list of policies on outgoing network traffic +message EgressNetworkPolicySpec { + // egress contains the list of egress policy rules + repeated EgressNetworkPolicyRule egress = 1; +} + +// HostSubnet was used by OpenShift SDN. +// DEPRECATED: OpenShift SDN is no longer supported and this object is no longer used in +// any way by OpenShift. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=hostsubnets,scope=Cluster +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/527 +// +openshift:file-pattern=operatorOrdering=002 +// +kubebuilder:printcolumn:name="Host",type=string,JSONPath=.host,description="The name of the node" +// +kubebuilder:printcolumn:name="Host IP",type=string,JSONPath=.hostIP,description="The IP address to be used as a VTEP by other nodes in the overlay network" +// +kubebuilder:printcolumn:name="Subnet",type=string,JSONPath=.subnet,description="The CIDR range of the overlay network assigned to the node for its pods" +// +kubebuilder:printcolumn:name="Egress CIDRs",type=string,JSONPath=.egressCIDRs,description="The network egress CIDRs" +// +kubebuilder:printcolumn:name="Egress IPs",type=string,JSONPath=.egressIPs,description="The network egress IP addresses" +// +openshift:compatibility-gen:level=1 +message HostSubnet { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // host is the name of the node. (This is the same as the object's name, but both fields must be set.) + // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$` + optional string host = 2; + + // hostIP is the IP address to be used as a VTEP by other nodes in the overlay network + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$` + optional string hostIP = 3; + + // subnet is the CIDR range of the overlay network assigned to the node for its pods + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + optional string subnet = 4; + + // egressIPs is the list of automatic egress IP addresses currently hosted by this node. + // If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the + // master will overwrite the value here with its own allocation of egress IPs. + // +optional + repeated string egressIPs = 5; + + // egressCIDRs is the list of CIDR ranges available for automatically assigning + // egress IPs to this node from. If this field is set then EgressIPs should be + // treated as read-only. + // +optional + repeated string egressCIDRs = 6; +} + +// HostSubnetList is a collection of HostSubnets +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message HostSubnetList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of host subnets + repeated HostSubnet items = 2; +} + +// NetNamespace was used by OpenShift SDN. +// DEPRECATED: OpenShift SDN is no longer supported and this object is no longer used in +// any way by OpenShift. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=netnamespaces,scope=Cluster +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/527 +// +openshift:file-pattern=operatorOrdering=003 +// +kubebuilder:printcolumn:name="NetID",type=integer,JSONPath=.netid,description="The network identifier of the network namespace" +// +kubebuilder:printcolumn:name="Egress IPs",type=string,JSONPath=.egressIPs,description="The network egress IP addresses" +// +openshift:compatibility-gen:level=1 +message NetNamespace { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // netname is the name of the network namespace. (This is the same as the object's name, but both fields must be set.) + // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$` + optional string netname = 2; + + // netid is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the "oc adm pod-network" commands. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=16777215 + optional uint32 netid = 3; + + // egressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. + // (If empty, external traffic will be masqueraded to Node IPs.) + // +optional + repeated string egressIPs = 4; +} + +// NetNamespaceList is a collection of NetNamespaces +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message NetNamespaceList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of net namespaces + repeated NetNamespace items = 2; +} + diff --git a/vendor/github.com/openshift/api/network/v1/legacy.go b/vendor/github.com/openshift/api/network/v1/legacy.go new file mode 100644 index 0000000000000..4395ebf8e56da --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/legacy.go @@ -0,0 +1,27 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &ClusterNetwork{}, + &ClusterNetworkList{}, + &HostSubnet{}, + &HostSubnetList{}, + &NetNamespace{}, + &NetNamespaceList{}, + &EgressNetworkPolicy{}, + &EgressNetworkPolicyList{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/network/v1/register.go b/vendor/github.com/openshift/api/network/v1/register.go new file mode 100644 index 0000000000000..80defa76427c7 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/register.go @@ -0,0 +1,44 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "network.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ClusterNetwork{}, + &ClusterNetworkList{}, + &HostSubnet{}, + &HostSubnetList{}, + &NetNamespace{}, + &NetNamespaceList{}, + &EgressNetworkPolicy{}, + &EgressNetworkPolicyList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/network/v1/types.go b/vendor/github.com/openshift/api/network/v1/types.go new file mode 100644 index 0000000000000..7790802138ca5 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/types.go @@ -0,0 +1,312 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + ClusterNetworkDefault = "default" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterNetwork was used by OpenShift SDN. +// DEPRECATED: OpenShift SDN is no longer supported and this object is no longer used in +// any way by OpenShift. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=clusternetworks,scope=Cluster +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/527 +// +openshift:file-pattern=operatorOrdering=001 +// +kubebuilder:printcolumn:name="Cluster Network",type=string,JSONPath=.network,description="The primary cluster network CIDR" +// +kubebuilder:printcolumn:name="Service Network",type=string,JSONPath=.serviceNetwork,description="The service network CIDR" +// +kubebuilder:printcolumn:name="Plugin Name",type=string,JSONPath=.pluginName,description="The OpenShift SDN network plug-in in use" +// +openshift:compatibility-gen:level=1 +type ClusterNetwork struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // network is a CIDR string specifying the global overlay network's L3 space + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + Network string `json:"network,omitempty" protobuf:"bytes,2,opt,name=network"` + + // hostsubnetlength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=30 + HostSubnetLength uint32 `json:"hostsubnetlength,omitempty" protobuf:"varint,3,opt,name=hostsubnetlength"` + + // serviceNetwork is the CIDR range that Service IP addresses are allocated from + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + ServiceNetwork string `json:"serviceNetwork" protobuf:"bytes,4,opt,name=serviceNetwork"` + + // pluginName is the name of the network plugin being used + PluginName string `json:"pluginName,omitempty" protobuf:"bytes,5,opt,name=pluginName"` + + // clusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from. + ClusterNetworks []ClusterNetworkEntry `json:"clusterNetworks" protobuf:"bytes,6,rep,name=clusterNetworks"` + + // vxlanPort sets the VXLAN destination port used by the cluster. + // It is set by the master configuration file on startup and cannot be edited manually. + // Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. + // Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +optional + VXLANPort *uint32 `json:"vxlanPort,omitempty" protobuf:"varint,7,opt,name=vxlanPort"` + + // mtu is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator. + // +kubebuilder:validation:Minimum=576 + // +kubebuilder:validation:Maximum=65536 + // +optional + MTU *uint32 `json:"mtu,omitempty" protobuf:"varint,8,opt,name=mtu"` +} + +// ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips. +type ClusterNetworkEntry struct { + // CIDR defines the total range of a cluster networks address space. + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + CIDR string `json:"CIDR" protobuf:"bytes,1,opt,name=cidr"` + + // hostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods. + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=30 + HostSubnetLength uint32 `json:"hostSubnetLength" protobuf:"varint,2,opt,name=hostSubnetLength"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterNetworkList is a collection of ClusterNetworks +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterNetworkList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of cluster networks + Items []ClusterNetwork `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// HostSubnetEgressIP represents one egress IP address currently hosted on the node represented by +// HostSubnet +// +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$` +type HostSubnetEgressIP string + +// HostSubnetEgressCIDR represents one egress CIDR from which to assign IP addresses for this node +// represented by the HostSubnet +// +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` +type HostSubnetEgressCIDR string + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HostSubnet was used by OpenShift SDN. +// DEPRECATED: OpenShift SDN is no longer supported and this object is no longer used in +// any way by OpenShift. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=hostsubnets,scope=Cluster +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/527 +// +openshift:file-pattern=operatorOrdering=002 +// +kubebuilder:printcolumn:name="Host",type=string,JSONPath=.host,description="The name of the node" +// +kubebuilder:printcolumn:name="Host IP",type=string,JSONPath=.hostIP,description="The IP address to be used as a VTEP by other nodes in the overlay network" +// +kubebuilder:printcolumn:name="Subnet",type=string,JSONPath=.subnet,description="The CIDR range of the overlay network assigned to the node for its pods" +// +kubebuilder:printcolumn:name="Egress CIDRs",type=string,JSONPath=.egressCIDRs,description="The network egress CIDRs" +// +kubebuilder:printcolumn:name="Egress IPs",type=string,JSONPath=.egressIPs,description="The network egress IP addresses" +// +openshift:compatibility-gen:level=1 +type HostSubnet struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // host is the name of the node. (This is the same as the object's name, but both fields must be set.) + // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$` + Host string `json:"host" protobuf:"bytes,2,opt,name=host"` + + // hostIP is the IP address to be used as a VTEP by other nodes in the overlay network + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$` + HostIP string `json:"hostIP" protobuf:"bytes,3,opt,name=hostIP"` + + // subnet is the CIDR range of the overlay network assigned to the node for its pods + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + Subnet string `json:"subnet" protobuf:"bytes,4,opt,name=subnet"` + + // egressIPs is the list of automatic egress IP addresses currently hosted by this node. + // If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the + // master will overwrite the value here with its own allocation of egress IPs. + // +optional + EgressIPs []HostSubnetEgressIP `json:"egressIPs,omitempty" protobuf:"bytes,5,rep,name=egressIPs"` + + // egressCIDRs is the list of CIDR ranges available for automatically assigning + // egress IPs to this node from. If this field is set then EgressIPs should be + // treated as read-only. + // +optional + EgressCIDRs []HostSubnetEgressCIDR `json:"egressCIDRs,omitempty" protobuf:"bytes,6,rep,name=egressCIDRs"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HostSubnetList is a collection of HostSubnets +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type HostSubnetList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of host subnets + Items []HostSubnet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// NetNamespaceEgressIP is a single egress IP out of a list of reserved IPs used as source of external traffic coming +// from pods in this namespace +// +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$` +type NetNamespaceEgressIP string + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NetNamespace was used by OpenShift SDN. +// DEPRECATED: OpenShift SDN is no longer supported and this object is no longer used in +// any way by OpenShift. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=netnamespaces,scope=Cluster +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/527 +// +openshift:file-pattern=operatorOrdering=003 +// +kubebuilder:printcolumn:name="NetID",type=integer,JSONPath=.netid,description="The network identifier of the network namespace" +// +kubebuilder:printcolumn:name="Egress IPs",type=string,JSONPath=.egressIPs,description="The network egress IP addresses" +// +openshift:compatibility-gen:level=1 +type NetNamespace struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // netname is the name of the network namespace. (This is the same as the object's name, but both fields must be set.) + // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$` + NetName string `json:"netname" protobuf:"bytes,2,opt,name=netname"` + + // netid is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the "oc adm pod-network" commands. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=16777215 + NetID uint32 `json:"netid" protobuf:"varint,3,opt,name=netid"` + + // egressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. + // (If empty, external traffic will be masqueraded to Node IPs.) + // +optional + EgressIPs []NetNamespaceEgressIP `json:"egressIPs,omitempty" protobuf:"bytes,4,rep,name=egressIPs"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NetNamespaceList is a collection of NetNamespaces +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type NetNamespaceList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of net namespaces + Items []NetNamespace `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// EgressNetworkPolicyRuleType indicates whether an EgressNetworkPolicyRule allows or denies traffic +// +kubebuilder:validation:Pattern=`^Allow|Deny$` +type EgressNetworkPolicyRuleType string + +const ( + EgressNetworkPolicyRuleAllow EgressNetworkPolicyRuleType = "Allow" + EgressNetworkPolicyRuleDeny EgressNetworkPolicyRuleType = "Deny" +) + +// EgressNetworkPolicyPeer specifies a target to apply egress network policy to +type EgressNetworkPolicyPeer struct { + // cidrSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset + // Ideally we would have liked to use the cidr openapi format for this property. + // But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs + // We are therefore using a regex pattern to validate instead. + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + CIDRSelector string `json:"cidrSelector,omitempty" protobuf:"bytes,1,rep,name=cidrSelector"` + // dnsName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset + // +kubebuilder:validation:Pattern=`^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$` + DNSName string `json:"dnsName,omitempty" protobuf:"bytes,2,rep,name=dnsName"` +} + +// EgressNetworkPolicyRule contains a single egress network policy rule +type EgressNetworkPolicyRule struct { + // type marks this as an "Allow" or "Deny" rule + Type EgressNetworkPolicyRuleType `json:"type" protobuf:"bytes,1,rep,name=type"` + // to is the target that traffic is allowed/denied to + To EgressNetworkPolicyPeer `json:"to" protobuf:"bytes,2,rep,name=to"` +} + +// EgressNetworkPolicySpec provides a list of policies on outgoing network traffic +type EgressNetworkPolicySpec struct { + // egress contains the list of egress policy rules + Egress []EgressNetworkPolicyRule `json:"egress" protobuf:"bytes,1,rep,name=egress"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EgressNetworkPolicy was used by OpenShift SDN. +// DEPRECATED: OpenShift SDN is no longer supported and this object is no longer used in +// any way by OpenShift. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=egressnetworkpolicies,scope=Namespaced +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/527 +// +openshift:file-pattern=operatorOrdering=004 +// +openshift:compatibility-gen:level=1 +type EgressNetworkPolicy struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the specification of the current egress network policy + Spec EgressNetworkPolicySpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EgressNetworkPolicyList is a collection of EgressNetworkPolicy +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type EgressNetworkPolicyList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of policies + Items []EgressNetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/openshift/api/network/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/network/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..ab6eb72aae509 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/zz_generated.deepcopy.go @@ -0,0 +1,347 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetwork) DeepCopyInto(out *ClusterNetwork) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.ClusterNetworks != nil { + in, out := &in.ClusterNetworks, &out.ClusterNetworks + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.VXLANPort != nil { + in, out := &in.VXLANPort, &out.VXLANPort + *out = new(uint32) + **out = **in + } + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetwork. +func (in *ClusterNetwork) DeepCopy() *ClusterNetwork { + if in == nil { + return nil + } + out := new(ClusterNetwork) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterNetwork) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. +func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { + if in == nil { + return nil + } + out := new(ClusterNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkList) DeepCopyInto(out *ClusterNetworkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterNetwork, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkList. +func (in *ClusterNetworkList) DeepCopy() *ClusterNetworkList { + if in == nil { + return nil + } + out := new(ClusterNetworkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterNetworkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressNetworkPolicy) DeepCopyInto(out *EgressNetworkPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicy. +func (in *EgressNetworkPolicy) DeepCopy() *EgressNetworkPolicy { + if in == nil { + return nil + } + out := new(EgressNetworkPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EgressNetworkPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressNetworkPolicyList) DeepCopyInto(out *EgressNetworkPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EgressNetworkPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicyList. +func (in *EgressNetworkPolicyList) DeepCopy() *EgressNetworkPolicyList { + if in == nil { + return nil + } + out := new(EgressNetworkPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EgressNetworkPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressNetworkPolicyPeer) DeepCopyInto(out *EgressNetworkPolicyPeer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicyPeer. +func (in *EgressNetworkPolicyPeer) DeepCopy() *EgressNetworkPolicyPeer { + if in == nil { + return nil + } + out := new(EgressNetworkPolicyPeer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressNetworkPolicyRule) DeepCopyInto(out *EgressNetworkPolicyRule) { + *out = *in + out.To = in.To + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicyRule. +func (in *EgressNetworkPolicyRule) DeepCopy() *EgressNetworkPolicyRule { + if in == nil { + return nil + } + out := new(EgressNetworkPolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressNetworkPolicySpec) DeepCopyInto(out *EgressNetworkPolicySpec) { + *out = *in + if in.Egress != nil { + in, out := &in.Egress, &out.Egress + *out = make([]EgressNetworkPolicyRule, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicySpec. +func (in *EgressNetworkPolicySpec) DeepCopy() *EgressNetworkPolicySpec { + if in == nil { + return nil + } + out := new(EgressNetworkPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostSubnet) DeepCopyInto(out *HostSubnet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.EgressIPs != nil { + in, out := &in.EgressIPs, &out.EgressIPs + *out = make([]HostSubnetEgressIP, len(*in)) + copy(*out, *in) + } + if in.EgressCIDRs != nil { + in, out := &in.EgressCIDRs, &out.EgressCIDRs + *out = make([]HostSubnetEgressCIDR, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostSubnet. +func (in *HostSubnet) DeepCopy() *HostSubnet { + if in == nil { + return nil + } + out := new(HostSubnet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HostSubnet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostSubnetList) DeepCopyInto(out *HostSubnetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HostSubnet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostSubnetList. +func (in *HostSubnetList) DeepCopy() *HostSubnetList { + if in == nil { + return nil + } + out := new(HostSubnetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HostSubnetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetNamespace) DeepCopyInto(out *NetNamespace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.EgressIPs != nil { + in, out := &in.EgressIPs, &out.EgressIPs + *out = make([]NetNamespaceEgressIP, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetNamespace. +func (in *NetNamespace) DeepCopy() *NetNamespace { + if in == nil { + return nil + } + out := new(NetNamespace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetNamespace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetNamespaceList) DeepCopyInto(out *NetNamespaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NetNamespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetNamespaceList. +func (in *NetNamespaceList) DeepCopy() *NetNamespaceList { + if in == nil { + return nil + } + out := new(NetNamespaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetNamespaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/github.com/openshift/api/network/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/network/v1/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 0000000000000..2f32210d28dec --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,126 @@ +clusternetworks.network.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/527 + CRDName: clusternetworks.network.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: "" + FilenameOperatorOrdering: "001" + FilenameRunLevel: "" + GroupName: network.openshift.io + HasStatus: false + KindName: ClusterNetwork + Labels: {} + PluralName: clusternetworks + PrinterColumns: + - description: The primary cluster network CIDR + jsonPath: .network + name: Cluster Network + type: string + - description: The service network CIDR + jsonPath: .serviceNetwork + name: Service Network + type: string + - description: The OpenShift SDN network plug-in in use + jsonPath: .pluginName + name: Plugin Name + type: string + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +egressnetworkpolicies.network.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/527 + CRDName: egressnetworkpolicies.network.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: "" + FilenameOperatorOrdering: "004" + FilenameRunLevel: "" + GroupName: network.openshift.io + HasStatus: false + KindName: EgressNetworkPolicy + Labels: {} + PluralName: egressnetworkpolicies + PrinterColumns: [] + Scope: Namespaced + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +hostsubnets.network.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/527 + CRDName: hostsubnets.network.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: "" + FilenameOperatorOrdering: "002" + FilenameRunLevel: "" + GroupName: network.openshift.io + HasStatus: false + KindName: HostSubnet + Labels: {} + PluralName: hostsubnets + PrinterColumns: + - description: The name of the node + jsonPath: .host + name: Host + type: string + - description: The IP address to be used as a VTEP by other nodes in the overlay + network + jsonPath: .hostIP + name: Host IP + type: string + - description: The CIDR range of the overlay network assigned to the node for its + pods + jsonPath: .subnet + name: Subnet + type: string + - description: The network egress CIDRs + jsonPath: .egressCIDRs + name: Egress CIDRs + type: string + - description: The network egress IP addresses + jsonPath: .egressIPs + name: Egress IPs + type: string + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +netnamespaces.network.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/527 + CRDName: netnamespaces.network.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: "" + FilenameOperatorOrdering: "003" + FilenameRunLevel: "" + GroupName: network.openshift.io + HasStatus: false + KindName: NetNamespace + Labels: {} + PluralName: netnamespaces + PrinterColumns: + - description: The network identifier of the network namespace + jsonPath: .netid + name: NetID + type: integer + - description: The network egress IP addresses + jsonPath: .egressIPs + name: Egress IPs + type: string + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + diff --git a/vendor/github.com/openshift/api/network/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/network/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000..a0e1240962a1e --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,145 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_ClusterNetwork = map[string]string{ + "": "ClusterNetwork was used by OpenShift SDN. DEPRECATED: OpenShift SDN is no longer supported and this object is no longer used in any way by OpenShift.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "network": "network is a CIDR string specifying the global overlay network's L3 space", + "hostsubnetlength": "hostsubnetlength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods", + "serviceNetwork": "serviceNetwork is the CIDR range that Service IP addresses are allocated from", + "pluginName": "pluginName is the name of the network plugin being used", + "clusterNetworks": "clusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from.", + "vxlanPort": "vxlanPort sets the VXLAN destination port used by the cluster. It is set by the master configuration file on startup and cannot be edited manually. Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port.", + "mtu": "mtu is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator.", +} + +func (ClusterNetwork) SwaggerDoc() map[string]string { + return map_ClusterNetwork +} + +var map_ClusterNetworkEntry = map[string]string{ + "": "ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips.", + "CIDR": "CIDR defines the total range of a cluster networks address space.", + "hostSubnetLength": "hostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods.", +} + +func (ClusterNetworkEntry) SwaggerDoc() map[string]string { + return map_ClusterNetworkEntry +} + +var map_ClusterNetworkList = map[string]string{ + "": "ClusterNetworkList is a collection of ClusterNetworks\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of cluster networks", +} + +func (ClusterNetworkList) SwaggerDoc() map[string]string { + return map_ClusterNetworkList +} + +var map_EgressNetworkPolicy = map[string]string{ + "": "EgressNetworkPolicy was used by OpenShift SDN. DEPRECATED: OpenShift SDN is no longer supported and this object is no longer used in any way by OpenShift.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the current egress network policy", +} + +func (EgressNetworkPolicy) SwaggerDoc() map[string]string { + return map_EgressNetworkPolicy +} + +var map_EgressNetworkPolicyList = map[string]string{ + "": "EgressNetworkPolicyList is a collection of EgressNetworkPolicy\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of policies", +} + +func (EgressNetworkPolicyList) SwaggerDoc() map[string]string { + return map_EgressNetworkPolicyList +} + +var map_EgressNetworkPolicyPeer = map[string]string{ + "": "EgressNetworkPolicyPeer specifies a target to apply egress network policy to", + "cidrSelector": "cidrSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset Ideally we would have liked to use the cidr openapi format for this property. But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs We are therefore using a regex pattern to validate instead.", + "dnsName": "dnsName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset", +} + +func (EgressNetworkPolicyPeer) SwaggerDoc() map[string]string { + return map_EgressNetworkPolicyPeer +} + +var map_EgressNetworkPolicyRule = map[string]string{ + "": "EgressNetworkPolicyRule contains a single egress network policy rule", + "type": "type marks this as an \"Allow\" or \"Deny\" rule", + "to": "to is the target that traffic is allowed/denied to", +} + +func (EgressNetworkPolicyRule) SwaggerDoc() map[string]string { + return map_EgressNetworkPolicyRule +} + +var map_EgressNetworkPolicySpec = map[string]string{ + "": "EgressNetworkPolicySpec provides a list of policies on outgoing network traffic", + "egress": "egress contains the list of egress policy rules", +} + +func (EgressNetworkPolicySpec) SwaggerDoc() map[string]string { + return map_EgressNetworkPolicySpec +} + +var map_HostSubnet = map[string]string{ + "": "HostSubnet was used by OpenShift SDN. DEPRECATED: OpenShift SDN is no longer supported and this object is no longer used in any way by OpenShift.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "host": "host is the name of the node. (This is the same as the object's name, but both fields must be set.)", + "hostIP": "hostIP is the IP address to be used as a VTEP by other nodes in the overlay network", + "subnet": "subnet is the CIDR range of the overlay network assigned to the node for its pods", + "egressIPs": "egressIPs is the list of automatic egress IP addresses currently hosted by this node. If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the master will overwrite the value here with its own allocation of egress IPs.", + "egressCIDRs": "egressCIDRs is the list of CIDR ranges available for automatically assigning egress IPs to this node from. If this field is set then EgressIPs should be treated as read-only.", +} + +func (HostSubnet) SwaggerDoc() map[string]string { + return map_HostSubnet +} + +var map_HostSubnetList = map[string]string{ + "": "HostSubnetList is a collection of HostSubnets\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of host subnets", +} + +func (HostSubnetList) SwaggerDoc() map[string]string { + return map_HostSubnetList +} + +var map_NetNamespace = map[string]string{ + "": "NetNamespace was used by OpenShift SDN. DEPRECATED: OpenShift SDN is no longer supported and this object is no longer used in any way by OpenShift.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "netname": "netname is the name of the network namespace. (This is the same as the object's name, but both fields must be set.)", + "netid": "netid is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the \"oc adm pod-network\" commands.", + "egressIPs": "egressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. (If empty, external traffic will be masqueraded to Node IPs.)", +} + +func (NetNamespace) SwaggerDoc() map[string]string { + return map_NetNamespace +} + +var map_NetNamespaceList = map[string]string{ + "": "NetNamespaceList is a collection of NetNamespaces\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of net namespaces", +} + +func (NetNamespaceList) SwaggerDoc() map[string]string { + return map_NetNamespaceList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/network/v1alpha1/Makefile b/vendor/github.com/openshift/api/network/v1alpha1/Makefile new file mode 100644 index 0000000000000..376fee2dc012e --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="network.openshift.io/v1alpha1" diff --git a/vendor/github.com/openshift/api/network/v1alpha1/doc.go b/vendor/github.com/openshift/api/network/v1alpha1/doc.go new file mode 100644 index 0000000000000..35539c458cd9a --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/doc.go @@ -0,0 +1,6 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=network.openshift.io +package v1alpha1 diff --git a/vendor/github.com/openshift/api/network/v1alpha1/register.go b/vendor/github.com/openshift/api/network/v1alpha1/register.go new file mode 100644 index 0000000000000..6d80c234ba58a --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/register.go @@ -0,0 +1,40 @@ +package v1alpha1 + +import ( + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "network.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + + scheme.AddKnownTypes(GroupVersion, + &DNSNameResolver{}, + &DNSNameResolverList{}, + ) + + return nil +} diff --git a/vendor/github.com/openshift/api/network/v1alpha1/types_dnsnameresolver.go b/vendor/github.com/openshift/api/network/v1alpha1/types_dnsnameresolver.go new file mode 100644 index 0000000000000..cd0d1b31a5f4d --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/types_dnsnameresolver.go @@ -0,0 +1,142 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=dnsnameresolvers,scope=Namespaced +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1524 +// +openshift:file-pattern=cvoRunLevel=0000_70,operatorName=dns,operatorOrdering=00 +// +openshift:compatibility-gen:level=4 +// +openshift:enable:FeatureGate=DNSNameResolver + +// DNSNameResolver stores the DNS name resolution information of a DNS name. It can be enabled by the TechPreviewNoUpgrade feature set. +// It can also be enabled by the feature gate DNSNameResolver when using CustomNoUpgrade feature set. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +type DNSNameResolver struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the specification of the desired behavior of the DNSNameResolver. + // +required + Spec DNSNameResolverSpec `json:"spec"` + // status is the most recently observed status of the DNSNameResolver. + // +optional + Status DNSNameResolverStatus `json:"status,omitempty"` +} + +// DNSName is used for validation of a DNS name. +// +kubebuilder:validation:Pattern=`^(\*\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\.){2,}$` +// +kubebuilder:validation:MaxLength=254 +type DNSName string + +// DNSNameResolverSpec is a desired state description of DNSNameResolver. +type DNSNameResolverSpec struct { + // name is the DNS name for which the DNS name resolution information will be stored. + // For a regular DNS name, only the DNS name resolution information of the regular DNS + // name will be stored. For a wildcard DNS name, the DNS name resolution information + // of all the DNS names that match the wildcard DNS name will be stored. + // For a wildcard DNS name, the '*' will match only one label. Additionally, only a single + // '*' can be used at the beginning of the wildcard DNS name. For example, '*.example.com.' + // will match 'sub1.example.com.' but won't match 'sub2.sub1.example.com.' + // +required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="spec.name is immutable" + Name DNSName `json:"name"` +} + +// DNSNameResolverStatus defines the observed status of DNSNameResolver. +type DNSNameResolverStatus struct { + // resolvedNames contains a list of matching DNS names and their corresponding IP addresses + // along with their TTL and last DNS lookup times. + // +listType=map + // +listMapKey=dnsName + // +patchMergeKey=dnsName + // +patchStrategy=merge + // +optional + ResolvedNames []DNSNameResolverResolvedName `json:"resolvedNames,omitempty" patchStrategy:"merge" patchMergeKey:"dnsName"` +} + +// DNSNameResolverResolvedName describes the details of a resolved DNS name. +type DNSNameResolverResolvedName struct { + // conditions provide information about the state of the DNS name. + // Known .status.conditions.type is: "Degraded". + // "Degraded" is true when the last resolution failed for the DNS name, + // and false otherwise. + // +optional + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // dnsName is the resolved DNS name matching the name field of DNSNameResolverSpec. This field can + // store both regular and wildcard DNS names which match the spec.name field. When the spec.name + // field contains a regular DNS name, this field will store the same regular DNS name after it is + // successfully resolved. When the spec.name field contains a wildcard DNS name, each resolvedName.dnsName + // will store the regular DNS names which match the wildcard DNS name and have been successfully resolved. + // If the wildcard DNS name can also be successfully resolved, then this field will store the wildcard + // DNS name as well. + // +required + DNSName DNSName `json:"dnsName"` + + // resolvedAddresses gives the list of associated IP addresses and their corresponding TTLs and last + // lookup times for the dnsName. + // +required + // +listType=map + // +listMapKey=ip + ResolvedAddresses []DNSNameResolverResolvedAddress `json:"resolvedAddresses"` + + // resolutionFailures keeps the count of how many consecutive times the DNS resolution failed + // for the dnsName. If the DNS resolution succeeds then the field will be set to zero. Upon + // every failure, the value of the field will be incremented by one. The details about the DNS + // name will be removed, if the value of resolutionFailures reaches 5 and the TTL of all the + // associated IP addresses have expired. + ResolutionFailures int32 `json:"resolutionFailures,omitempty"` +} + +// DNSNameResolverResolvedAddress describes the details of an IP address for a resolved DNS name. +type DNSNameResolverResolvedAddress struct { + // ip is an IP address associated with the dnsName. The validity of the IP address expires after + // lastLookupTime + ttlSeconds. To refresh the information, a DNS lookup will be performed upon + // the expiration of the IP address's validity. If the information is not refreshed then it will + // be removed with a grace period after the expiration of the IP address's validity. + // +required + IP string `json:"ip"` + + // ttlSeconds is the time-to-live value of the IP address. The validity of the IP address expires after + // lastLookupTime + ttlSeconds. On a successful DNS lookup the value of this field will be updated with + // the current time-to-live value. If the information is not refreshed then it will be removed with a + // grace period after the expiration of the IP address's validity. + // +required + TTLSeconds int32 `json:"ttlSeconds"` + + // lastLookupTime is the timestamp when the last DNS lookup was completed successfully. The validity of + // the IP address expires after lastLookupTime + ttlSeconds. The value of this field will be updated to + // the current time on a successful DNS lookup. If the information is not refreshed then it will be + // removed with a grace period after the expiration of the IP address's validity. + // +required + LastLookupTime *metav1.Time `json:"lastLookupTime"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:compatibility-gen:level=4 + +// DNSNameResolverList contains a list of DNSNameResolvers. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +type DNSNameResolverList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + // items gives the list of DNSNameResolvers. + Items []DNSNameResolver `json:"items"` +} diff --git a/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..b8308c3f8363a --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,161 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSNameResolver) DeepCopyInto(out *DNSNameResolver) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolver. +func (in *DNSNameResolver) DeepCopy() *DNSNameResolver { + if in == nil { + return nil + } + out := new(DNSNameResolver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSNameResolver) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSNameResolverList) DeepCopyInto(out *DNSNameResolverList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DNSNameResolver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolverList. +func (in *DNSNameResolverList) DeepCopy() *DNSNameResolverList { + if in == nil { + return nil + } + out := new(DNSNameResolverList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSNameResolverList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSNameResolverResolvedAddress) DeepCopyInto(out *DNSNameResolverResolvedAddress) { + *out = *in + if in.LastLookupTime != nil { + in, out := &in.LastLookupTime, &out.LastLookupTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolverResolvedAddress. +func (in *DNSNameResolverResolvedAddress) DeepCopy() *DNSNameResolverResolvedAddress { + if in == nil { + return nil + } + out := new(DNSNameResolverResolvedAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSNameResolverResolvedName) DeepCopyInto(out *DNSNameResolverResolvedName) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResolvedAddresses != nil { + in, out := &in.ResolvedAddresses, &out.ResolvedAddresses + *out = make([]DNSNameResolverResolvedAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolverResolvedName. +func (in *DNSNameResolverResolvedName) DeepCopy() *DNSNameResolverResolvedName { + if in == nil { + return nil + } + out := new(DNSNameResolverResolvedName) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSNameResolverSpec) DeepCopyInto(out *DNSNameResolverSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolverSpec. +func (in *DNSNameResolverSpec) DeepCopy() *DNSNameResolverSpec { + if in == nil { + return nil + } + out := new(DNSNameResolverSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSNameResolverStatus) DeepCopyInto(out *DNSNameResolverStatus) { + *out = *in + if in.ResolvedNames != nil { + in, out := &in.ResolvedNames, &out.ResolvedNames + *out = make([]DNSNameResolverResolvedName, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolverStatus. +func (in *DNSNameResolverStatus) DeepCopy() *DNSNameResolverStatus { + if in == nil { + return nil + } + out := new(DNSNameResolverStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 0000000000000..0070eb584eecd --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,23 @@ +dnsnameresolvers.network.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/1524 + CRDName: dnsnameresolvers.network.openshift.io + Capability: "" + Category: "" + FeatureGates: + - DNSNameResolver + FilenameOperatorName: dns + FilenameOperatorOrdering: "00" + FilenameRunLevel: "0000_70" + GroupName: network.openshift.io + HasStatus: true + KindName: DNSNameResolver + Labels: {} + PluralName: dnsnameresolvers + PrinterColumns: [] + Scope: Namespaced + ShortNames: null + TopLevelFeatureGates: + - DNSNameResolver + Version: v1alpha1 + diff --git a/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000..e5018a9736e1f --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,76 @@ +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_DNSNameResolver = map[string]string{ + "": "DNSNameResolver stores the DNS name resolution information of a DNS name. It can be enabled by the TechPreviewNoUpgrade feature set. It can also be enabled by the feature gate DNSNameResolver when using CustomNoUpgrade feature set.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the DNSNameResolver.", + "status": "status is the most recently observed status of the DNSNameResolver.", +} + +func (DNSNameResolver) SwaggerDoc() map[string]string { + return map_DNSNameResolver +} + +var map_DNSNameResolverList = map[string]string{ + "": "DNSNameResolverList contains a list of DNSNameResolvers.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items gives the list of DNSNameResolvers.", +} + +func (DNSNameResolverList) SwaggerDoc() map[string]string { + return map_DNSNameResolverList +} + +var map_DNSNameResolverResolvedAddress = map[string]string{ + "": "DNSNameResolverResolvedAddress describes the details of an IP address for a resolved DNS name.", + "ip": "ip is an IP address associated with the dnsName. The validity of the IP address expires after lastLookupTime + ttlSeconds. To refresh the information, a DNS lookup will be performed upon the expiration of the IP address's validity. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity.", + "ttlSeconds": "ttlSeconds is the time-to-live value of the IP address. The validity of the IP address expires after lastLookupTime + ttlSeconds. On a successful DNS lookup the value of this field will be updated with the current time-to-live value. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity.", + "lastLookupTime": "lastLookupTime is the timestamp when the last DNS lookup was completed successfully. The validity of the IP address expires after lastLookupTime + ttlSeconds. The value of this field will be updated to the current time on a successful DNS lookup. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity.", +} + +func (DNSNameResolverResolvedAddress) SwaggerDoc() map[string]string { + return map_DNSNameResolverResolvedAddress +} + +var map_DNSNameResolverResolvedName = map[string]string{ + "": "DNSNameResolverResolvedName describes the details of a resolved DNS name.", + "conditions": "conditions provide information about the state of the DNS name. Known .status.conditions.type is: \"Degraded\". \"Degraded\" is true when the last resolution failed for the DNS name, and false otherwise.", + "dnsName": "dnsName is the resolved DNS name matching the name field of DNSNameResolverSpec. This field can store both regular and wildcard DNS names which match the spec.name field. When the spec.name field contains a regular DNS name, this field will store the same regular DNS name after it is successfully resolved. When the spec.name field contains a wildcard DNS name, each resolvedName.dnsName will store the regular DNS names which match the wildcard DNS name and have been successfully resolved. If the wildcard DNS name can also be successfully resolved, then this field will store the wildcard DNS name as well.", + "resolvedAddresses": "resolvedAddresses gives the list of associated IP addresses and their corresponding TTLs and last lookup times for the dnsName.", + "resolutionFailures": "resolutionFailures keeps the count of how many consecutive times the DNS resolution failed for the dnsName. If the DNS resolution succeeds then the field will be set to zero. Upon every failure, the value of the field will be incremented by one. The details about the DNS name will be removed, if the value of resolutionFailures reaches 5 and the TTL of all the associated IP addresses have expired.", +} + +func (DNSNameResolverResolvedName) SwaggerDoc() map[string]string { + return map_DNSNameResolverResolvedName +} + +var map_DNSNameResolverSpec = map[string]string{ + "": "DNSNameResolverSpec is a desired state description of DNSNameResolver.", + "name": "name is the DNS name for which the DNS name resolution information will be stored. For a regular DNS name, only the DNS name resolution information of the regular DNS name will be stored. For a wildcard DNS name, the DNS name resolution information of all the DNS names that match the wildcard DNS name will be stored. For a wildcard DNS name, the '*' will match only one label. Additionally, only a single '*' can be used at the beginning of the wildcard DNS name. For example, '*.example.com.' will match 'sub1.example.com.' but won't match 'sub2.sub1.example.com.'", +} + +func (DNSNameResolverSpec) SwaggerDoc() map[string]string { + return map_DNSNameResolverSpec +} + +var map_DNSNameResolverStatus = map[string]string{ + "": "DNSNameResolverStatus defines the observed status of DNSNameResolver.", + "resolvedNames": "resolvedNames contains a list of matching DNS names and their corresponding IP addresses along with their TTL and last DNS lookup times.", +} + +func (DNSNameResolverStatus) SwaggerDoc() map[string]string { + return map_DNSNameResolverStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/oauth/v1/doc.go b/vendor/github.com/openshift/api/oauth/v1/doc.go new file mode 100644 index 0000000000000..cae9e70d4a536 --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/oauth/apis/oauth +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=oauth.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/oauth/v1/generated.pb.go b/vendor/github.com/openshift/api/oauth/v1/generated.pb.go new file mode 100644 index 0000000000000..a79c46802051a --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/generated.pb.go @@ -0,0 +1,4624 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/oauth/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ClusterRoleScopeRestriction) Reset() { *m = ClusterRoleScopeRestriction{} } +func (*ClusterRoleScopeRestriction) ProtoMessage() {} +func (*ClusterRoleScopeRestriction) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{0} +} +func (m *ClusterRoleScopeRestriction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleScopeRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleScopeRestriction) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleScopeRestriction.Merge(m, src) +} +func (m *ClusterRoleScopeRestriction) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleScopeRestriction) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleScopeRestriction.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleScopeRestriction proto.InternalMessageInfo + +func (m *OAuthAccessToken) Reset() { *m = OAuthAccessToken{} } +func (*OAuthAccessToken) ProtoMessage() {} +func (*OAuthAccessToken) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{1} +} +func (m *OAuthAccessToken) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthAccessToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthAccessToken) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthAccessToken.Merge(m, src) +} +func (m *OAuthAccessToken) XXX_Size() int { + return m.Size() +} +func (m *OAuthAccessToken) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthAccessToken.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthAccessToken proto.InternalMessageInfo + +func (m *OAuthAccessTokenList) Reset() { *m = OAuthAccessTokenList{} } +func (*OAuthAccessTokenList) ProtoMessage() {} +func (*OAuthAccessTokenList) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{2} +} +func (m *OAuthAccessTokenList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthAccessTokenList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthAccessTokenList) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthAccessTokenList.Merge(m, src) +} +func (m *OAuthAccessTokenList) XXX_Size() int { + return m.Size() +} +func (m *OAuthAccessTokenList) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthAccessTokenList.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthAccessTokenList proto.InternalMessageInfo + +func (m *OAuthAuthorizeToken) Reset() { *m = OAuthAuthorizeToken{} } +func (*OAuthAuthorizeToken) ProtoMessage() {} +func (*OAuthAuthorizeToken) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{3} +} +func (m *OAuthAuthorizeToken) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthAuthorizeToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthAuthorizeToken) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthAuthorizeToken.Merge(m, src) +} +func (m *OAuthAuthorizeToken) XXX_Size() int { + return m.Size() +} +func (m *OAuthAuthorizeToken) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthAuthorizeToken.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthAuthorizeToken proto.InternalMessageInfo + +func (m *OAuthAuthorizeTokenList) Reset() { *m = OAuthAuthorizeTokenList{} } +func (*OAuthAuthorizeTokenList) ProtoMessage() {} +func (*OAuthAuthorizeTokenList) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{4} +} +func (m *OAuthAuthorizeTokenList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthAuthorizeTokenList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthAuthorizeTokenList) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthAuthorizeTokenList.Merge(m, src) +} +func (m *OAuthAuthorizeTokenList) XXX_Size() int { + return m.Size() +} +func (m *OAuthAuthorizeTokenList) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthAuthorizeTokenList.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthAuthorizeTokenList proto.InternalMessageInfo + +func (m *OAuthClient) Reset() { *m = OAuthClient{} } +func (*OAuthClient) ProtoMessage() {} +func (*OAuthClient) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{5} +} +func (m *OAuthClient) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthClient) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthClient) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthClient.Merge(m, src) +} +func (m *OAuthClient) XXX_Size() int { + return m.Size() +} +func (m *OAuthClient) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthClient.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthClient proto.InternalMessageInfo + +func (m *OAuthClientAuthorization) Reset() { *m = OAuthClientAuthorization{} } +func (*OAuthClientAuthorization) ProtoMessage() {} +func (*OAuthClientAuthorization) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{6} +} +func (m *OAuthClientAuthorization) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthClientAuthorization) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthClientAuthorization) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthClientAuthorization.Merge(m, src) +} +func (m *OAuthClientAuthorization) XXX_Size() int { + return m.Size() +} +func (m *OAuthClientAuthorization) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthClientAuthorization.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthClientAuthorization proto.InternalMessageInfo + +func (m *OAuthClientAuthorizationList) Reset() { *m = OAuthClientAuthorizationList{} } +func (*OAuthClientAuthorizationList) ProtoMessage() {} +func (*OAuthClientAuthorizationList) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{7} +} +func (m *OAuthClientAuthorizationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthClientAuthorizationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthClientAuthorizationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthClientAuthorizationList.Merge(m, src) +} +func (m *OAuthClientAuthorizationList) XXX_Size() int { + return m.Size() +} +func (m *OAuthClientAuthorizationList) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthClientAuthorizationList.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthClientAuthorizationList proto.InternalMessageInfo + +func (m *OAuthClientList) Reset() { *m = OAuthClientList{} } +func (*OAuthClientList) ProtoMessage() {} +func (*OAuthClientList) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{8} +} +func (m *OAuthClientList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthClientList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthClientList) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthClientList.Merge(m, src) +} +func (m *OAuthClientList) XXX_Size() int { + return m.Size() +} +func (m *OAuthClientList) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthClientList.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthClientList proto.InternalMessageInfo + +func (m *OAuthRedirectReference) Reset() { *m = OAuthRedirectReference{} } +func (*OAuthRedirectReference) ProtoMessage() {} +func (*OAuthRedirectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{9} +} +func (m *OAuthRedirectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthRedirectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthRedirectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthRedirectReference.Merge(m, src) +} +func (m *OAuthRedirectReference) XXX_Size() int { + return m.Size() +} +func (m *OAuthRedirectReference) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthRedirectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthRedirectReference proto.InternalMessageInfo + +func (m *RedirectReference) Reset() { *m = RedirectReference{} } +func (*RedirectReference) ProtoMessage() {} +func (*RedirectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{10} +} +func (m *RedirectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RedirectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RedirectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_RedirectReference.Merge(m, src) +} +func (m *RedirectReference) XXX_Size() int { + return m.Size() +} +func (m *RedirectReference) XXX_DiscardUnknown() { + xxx_messageInfo_RedirectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_RedirectReference proto.InternalMessageInfo + +func (m *ScopeRestriction) Reset() { *m = ScopeRestriction{} } +func (*ScopeRestriction) ProtoMessage() {} +func (*ScopeRestriction) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{11} +} +func (m *ScopeRestriction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScopeRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScopeRestriction) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScopeRestriction.Merge(m, src) +} +func (m *ScopeRestriction) XXX_Size() int { + return m.Size() +} +func (m *ScopeRestriction) XXX_DiscardUnknown() { + xxx_messageInfo_ScopeRestriction.DiscardUnknown(m) +} + +var xxx_messageInfo_ScopeRestriction proto.InternalMessageInfo + +func (m *UserOAuthAccessToken) Reset() { *m = UserOAuthAccessToken{} } +func (*UserOAuthAccessToken) ProtoMessage() {} +func (*UserOAuthAccessToken) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{12} +} +func (m *UserOAuthAccessToken) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserOAuthAccessToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserOAuthAccessToken) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserOAuthAccessToken.Merge(m, src) +} +func (m *UserOAuthAccessToken) XXX_Size() int { + return m.Size() +} +func (m *UserOAuthAccessToken) XXX_DiscardUnknown() { + xxx_messageInfo_UserOAuthAccessToken.DiscardUnknown(m) +} + +var xxx_messageInfo_UserOAuthAccessToken proto.InternalMessageInfo + +func (m *UserOAuthAccessTokenList) Reset() { *m = UserOAuthAccessTokenList{} } +func (*UserOAuthAccessTokenList) ProtoMessage() {} +func (*UserOAuthAccessTokenList) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{13} +} +func (m *UserOAuthAccessTokenList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserOAuthAccessTokenList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserOAuthAccessTokenList) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserOAuthAccessTokenList.Merge(m, src) +} +func (m *UserOAuthAccessTokenList) XXX_Size() int { + return m.Size() +} +func (m *UserOAuthAccessTokenList) XXX_DiscardUnknown() { + xxx_messageInfo_UserOAuthAccessTokenList.DiscardUnknown(m) +} + +var xxx_messageInfo_UserOAuthAccessTokenList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ClusterRoleScopeRestriction)(nil), "github.com.openshift.api.oauth.v1.ClusterRoleScopeRestriction") + proto.RegisterType((*OAuthAccessToken)(nil), "github.com.openshift.api.oauth.v1.OAuthAccessToken") + proto.RegisterType((*OAuthAccessTokenList)(nil), "github.com.openshift.api.oauth.v1.OAuthAccessTokenList") + proto.RegisterType((*OAuthAuthorizeToken)(nil), "github.com.openshift.api.oauth.v1.OAuthAuthorizeToken") + proto.RegisterType((*OAuthAuthorizeTokenList)(nil), "github.com.openshift.api.oauth.v1.OAuthAuthorizeTokenList") + proto.RegisterType((*OAuthClient)(nil), "github.com.openshift.api.oauth.v1.OAuthClient") + proto.RegisterType((*OAuthClientAuthorization)(nil), "github.com.openshift.api.oauth.v1.OAuthClientAuthorization") + proto.RegisterType((*OAuthClientAuthorizationList)(nil), "github.com.openshift.api.oauth.v1.OAuthClientAuthorizationList") + proto.RegisterType((*OAuthClientList)(nil), "github.com.openshift.api.oauth.v1.OAuthClientList") + proto.RegisterType((*OAuthRedirectReference)(nil), "github.com.openshift.api.oauth.v1.OAuthRedirectReference") + proto.RegisterType((*RedirectReference)(nil), "github.com.openshift.api.oauth.v1.RedirectReference") + proto.RegisterType((*ScopeRestriction)(nil), "github.com.openshift.api.oauth.v1.ScopeRestriction") + proto.RegisterType((*UserOAuthAccessToken)(nil), "github.com.openshift.api.oauth.v1.UserOAuthAccessToken") + proto.RegisterType((*UserOAuthAccessTokenList)(nil), "github.com.openshift.api.oauth.v1.UserOAuthAccessTokenList") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/oauth/v1/generated.proto", fileDescriptor_bd688dca7ea39c8a) +} + +var fileDescriptor_bd688dca7ea39c8a = []byte{ + // 1272 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcf, 0x6f, 0x1b, 0xc5, + 0x17, 0xcf, 0x36, 0x76, 0x62, 0x3f, 0x37, 0xbf, 0x26, 0x4d, 0xbb, 0xdf, 0xb6, 0x5f, 0xdb, 0x75, + 0x24, 0x1a, 0x04, 0xac, 0x49, 0x28, 0xa5, 0x52, 0xa5, 0x4a, 0x76, 0xa8, 0x4a, 0x04, 0x69, 0xa5, + 0x49, 0x03, 0x15, 0xf4, 0xd0, 0xe9, 0xee, 0x8b, 0x3d, 0x64, 0xbd, 0xbb, 0xec, 0x8c, 0x43, 0x83, + 0x7a, 0xe0, 0xc2, 0x9d, 0x7f, 0x84, 0x0b, 0x77, 0x0e, 0x48, 0x1c, 0x7a, 0x42, 0x3d, 0x20, 0xd4, + 0x93, 0x45, 0x8c, 0x38, 0xf0, 0x2f, 0x70, 0x42, 0x3b, 0xbb, 0xde, 0x1f, 0x8e, 0x4d, 0xdc, 0x03, + 0x11, 0x87, 0xde, 0xbc, 0xef, 0x7d, 0x3e, 0x6f, 0xde, 0xcc, 0xbc, 0xcf, 0x9b, 0x19, 0xc3, 0x7a, + 0x8b, 0xcb, 0x76, 0xf7, 0xb1, 0x61, 0xba, 0x9d, 0xba, 0xeb, 0xa1, 0x23, 0xda, 0x7c, 0x4f, 0xd6, + 0x99, 0xc7, 0xeb, 0x2e, 0xeb, 0xca, 0x76, 0xfd, 0x60, 0xbd, 0xde, 0x42, 0x07, 0x7d, 0x26, 0xd1, + 0x32, 0x3c, 0xdf, 0x95, 0x2e, 0xb9, 0x92, 0x50, 0x8c, 0x98, 0x62, 0x30, 0x8f, 0x1b, 0x8a, 0x62, + 0x1c, 0xac, 0x5f, 0x7c, 0x2b, 0x15, 0xb5, 0xe5, 0xb6, 0xdc, 0xba, 0x62, 0x3e, 0xee, 0xee, 0xa9, + 0x2f, 0xf5, 0xa1, 0x7e, 0x85, 0x11, 0x2f, 0x5e, 0xdb, 0xbf, 0x21, 0x0c, 0xee, 0x06, 0xc3, 0x76, + 0x98, 0xd9, 0xe6, 0x0e, 0xfa, 0x87, 0x75, 0x6f, 0xbf, 0x15, 0x18, 0x44, 0xbd, 0x83, 0x92, 0x8d, + 0xc8, 0xe3, 0xe2, 0xf5, 0x71, 0x2c, 0xbf, 0xeb, 0x48, 0xde, 0xc1, 0xba, 0x30, 0xdb, 0xd8, 0x61, + 0xc3, 0xbc, 0xda, 0x0f, 0x1a, 0x5c, 0xda, 0xb4, 0xbb, 0x42, 0xa2, 0x4f, 0x5d, 0x1b, 0x77, 0x4c, + 0xd7, 0x43, 0x8a, 0x42, 0xfa, 0xdc, 0x94, 0xdc, 0x75, 0xc8, 0x1b, 0x50, 0xf4, 0x5d, 0x1b, 0xef, + 0xb2, 0x0e, 0x0a, 0x5d, 0xab, 0x4e, 0xaf, 0x15, 0x9b, 0x73, 0xfd, 0x5e, 0xa5, 0x48, 0x07, 0x46, + 0x9a, 0xf8, 0x89, 0x01, 0xe0, 0x04, 0x3f, 0x3c, 0x66, 0xa2, 0xd0, 0xcf, 0x28, 0xf4, 0x7c, 0xbf, + 0x57, 0x81, 0xbb, 0xb1, 0x95, 0xa6, 0x10, 0xa4, 0x01, 0x0b, 0xcc, 0xb6, 0xdd, 0x2f, 0x6f, 0x0b, + 0x93, 0xd9, 0x2c, 0x18, 0x4f, 0x9f, 0xae, 0x6a, 0x6b, 0x85, 0xe6, 0x85, 0x67, 0xbd, 0xca, 0x54, + 0xbf, 0x57, 0x59, 0x68, 0x64, 0xdd, 0x74, 0x18, 0x5f, 0xfb, 0x23, 0x07, 0x8b, 0xf7, 0x1a, 0x5d, + 0xd9, 0x6e, 0x98, 0x26, 0x0a, 0x71, 0xdf, 0xdd, 0x47, 0x87, 0x3c, 0x82, 0x42, 0xb0, 0x4e, 0x16, + 0x93, 0x4c, 0xd7, 0xaa, 0xda, 0x5a, 0x69, 0xe3, 0x6d, 0x23, 0x5c, 0x1f, 0x23, 0xbd, 0x3e, 0x86, + 0xb7, 0xdf, 0x0a, 0x0c, 0xc2, 0x08, 0xd0, 0xc6, 0xc1, 0xba, 0x71, 0xef, 0xf1, 0xe7, 0x68, 0xca, + 0x6d, 0x94, 0xac, 0x49, 0xa2, 0x14, 0x20, 0xb1, 0xd1, 0x38, 0x2a, 0xd9, 0x00, 0x30, 0x6d, 0x8e, + 0x8e, 0x0c, 0x66, 0xa6, 0x9f, 0xa9, 0x6a, 0x6b, 0xc5, 0x84, 0xb1, 0x19, 0x7b, 0x68, 0x0a, 0x45, + 0xea, 0x50, 0xc4, 0x27, 0x1e, 0xf7, 0x51, 0x6c, 0x85, 0xf3, 0x9c, 0x6e, 0x2e, 0x45, 0x94, 0xe2, + 0xed, 0x81, 0x83, 0x26, 0x18, 0x52, 0x83, 0x19, 0x11, 0xec, 0x87, 0xd0, 0x73, 0x6a, 0x29, 0xa1, + 0xdf, 0xab, 0xcc, 0xa8, 0x1d, 0x12, 0x34, 0xf2, 0x90, 0x77, 0xa1, 0xe4, 0xa3, 0xc5, 0x7d, 0x34, + 0xe5, 0x2e, 0xdd, 0xd2, 0xf3, 0x2a, 0x93, 0xe5, 0x28, 0x6c, 0x89, 0x26, 0x2e, 0x9a, 0xc6, 0x91, + 0x37, 0xa1, 0xd0, 0x15, 0xe8, 0xab, 0xec, 0x67, 0x14, 0x67, 0x31, 0xe2, 0x14, 0x76, 0x23, 0x3b, + 0x8d, 0x11, 0xe4, 0x75, 0x98, 0x0d, 0x7e, 0xef, 0x6e, 0xbd, 0xaf, 0xcf, 0x2a, 0xf0, 0x42, 0x04, + 0x9e, 0xdd, 0x0d, 0xcd, 0x74, 0xe0, 0x27, 0xb7, 0x60, 0x3e, 0xa8, 0x7b, 0xd7, 0xe7, 0x5f, 0xa1, + 0xda, 0x0c, 0xbd, 0xa0, 0x18, 0xe7, 0x23, 0xc6, 0x7c, 0x23, 0xe3, 0xa5, 0x43, 0x68, 0x72, 0x03, + 0xce, 0xfa, 0xb8, 0xe7, 0xa3, 0x68, 0x87, 0xec, 0xa2, 0x62, 0x9f, 0x8b, 0xd8, 0x67, 0x69, 0xca, + 0x47, 0x33, 0x48, 0xf2, 0x10, 0x74, 0xee, 0x30, 0x53, 0xf2, 0x03, 0x2e, 0x0f, 0xef, 0xf3, 0x0e, + 0xba, 0x5d, 0xb9, 0x83, 0xa6, 0xeb, 0x58, 0x42, 0x87, 0xaa, 0xb6, 0x96, 0x6f, 0x56, 0xa3, 0x28, + 0xfa, 0xd6, 0x18, 0x1c, 0x1d, 0x1b, 0xa1, 0xf6, 0xb3, 0x06, 0xe7, 0x86, 0xeb, 0xec, 0x23, 0x2e, + 0x24, 0x79, 0x78, 0xac, 0xd6, 0x8c, 0xc9, 0x6a, 0x2d, 0x60, 0xab, 0x4a, 0x8b, 0x57, 0x7e, 0x60, + 0x49, 0xd5, 0xd9, 0x03, 0xc8, 0x73, 0x89, 0x9d, 0x50, 0x4c, 0xa5, 0x8d, 0x77, 0x8c, 0x13, 0xdb, + 0x8d, 0x31, 0x9c, 0x65, 0x73, 0x2e, 0x8a, 0x9f, 0xdf, 0x0a, 0x22, 0xd1, 0x30, 0x60, 0xed, 0xc7, + 0x1c, 0x2c, 0x87, 0xd0, 0xec, 0x06, 0xbc, 0xd2, 0xce, 0x49, 0xda, 0x59, 0x85, 0xbc, 0x90, 0x4c, + 0x0e, 0x84, 0x13, 0x2f, 0xef, 0x4e, 0x60, 0xa4, 0xa1, 0x2f, 0x23, 0xb0, 0xd9, 0x97, 0x11, 0x58, + 0xe1, 0x04, 0x81, 0xdd, 0x84, 0x39, 0xd3, 0xb5, 0x70, 0xb3, 0xcd, 0x6c, 0x1b, 0x9d, 0x16, 0x46, + 0x0a, 0x59, 0x89, 0x08, 0x73, 0x9b, 0x69, 0x27, 0xcd, 0x62, 0xc9, 0x36, 0x2c, 0x67, 0x0c, 0xdb, + 0x28, 0xdb, 0xae, 0xa5, 0xe4, 0x51, 0x6c, 0x5e, 0x8a, 0x42, 0x2c, 0x6f, 0x1e, 0x87, 0xd0, 0x51, + 0xbc, 0xda, 0x2f, 0x1a, 0x5c, 0x18, 0x51, 0x43, 0xa7, 0xa0, 0x8b, 0xcf, 0xb2, 0xba, 0xb8, 0x3e, + 0xb1, 0x2e, 0x32, 0x89, 0x8e, 0x91, 0xc6, 0x37, 0x33, 0x50, 0x52, 0xe8, 0xb0, 0x18, 0x4f, 0x41, + 0x12, 0xaf, 0xc1, 0x8c, 0x40, 0xd3, 0x47, 0x19, 0xc9, 0x61, 0x3e, 0x42, 0xcf, 0xec, 0x28, 0x2b, + 0x8d, 0xbc, 0x64, 0x13, 0x96, 0x98, 0x65, 0xf1, 0xe0, 0xe4, 0x63, 0x76, 0xe8, 0x13, 0xfa, 0xb4, + 0x2a, 0xf0, 0x95, 0x7e, 0xaf, 0xb2, 0xd4, 0x18, 0x76, 0xd2, 0xe3, 0x78, 0xb2, 0x03, 0x2b, 0x3e, + 0x0a, 0xcf, 0x75, 0xac, 0x4f, 0xb8, 0x6c, 0xc7, 0x7b, 0x1a, 0x28, 0x25, 0x38, 0x7b, 0xff, 0x1f, + 0x8d, 0xbd, 0x42, 0x47, 0x81, 0xe8, 0x68, 0x2e, 0xb9, 0x16, 0xf4, 0xed, 0x58, 0x23, 0x42, 0xcf, + 0xab, 0xa4, 0x16, 0xc3, 0x9e, 0x9d, 0xd8, 0x69, 0x06, 0x45, 0xb6, 0xa0, 0xd4, 0xf2, 0x99, 0x23, + 0xa3, 0x3a, 0x0c, 0x05, 0x75, 0x75, 0xa0, 0xc0, 0x3b, 0x89, 0xeb, 0xaf, 0x5e, 0x65, 0x51, 0x7d, + 0x7e, 0xc0, 0x1c, 0xcb, 0x46, 0xff, 0xfe, 0xa1, 0x87, 0x34, 0xcd, 0x25, 0x4f, 0x61, 0x49, 0x0c, + 0x5d, 0x5e, 0x84, 0x3e, 0x3b, 0x71, 0xd7, 0x1c, 0xbe, 0xf8, 0x34, 0xff, 0x17, 0x65, 0xb1, 0x34, + 0xec, 0x11, 0xf4, 0xf8, 0x40, 0xe4, 0x01, 0xe8, 0x2c, 0x69, 0xb9, 0xdb, 0xec, 0x49, 0xa3, 0x85, + 0x83, 0xc3, 0xa7, 0xa0, 0x0e, 0x9f, 0xcb, 0xc1, 0xc1, 0xd3, 0x18, 0x83, 0xa1, 0x63, 0xd9, 0xe4, + 0x10, 0x56, 0x53, 0xbe, 0x71, 0x27, 0x97, 0xea, 0x02, 0xf9, 0xe6, 0xd5, 0x7e, 0xaf, 0xb2, 0xda, + 0x38, 0x19, 0x4e, 0x27, 0x89, 0x59, 0xfb, 0xee, 0x0c, 0xe8, 0x29, 0x1d, 0x0c, 0xb4, 0xa3, 0x2e, + 0x5e, 0xff, 0xd1, 0x73, 0x22, 0xdd, 0x76, 0xa7, 0x5f, 0xa6, 0xed, 0xe6, 0x4e, 0x68, 0xbb, 0xc9, + 0x79, 0x92, 0x1f, 0x77, 0x9e, 0xd4, 0x7a, 0x1a, 0x5c, 0x1e, 0xb7, 0x5e, 0xa7, 0xd0, 0x13, 0x1f, + 0x65, 0x7b, 0xe2, 0xcd, 0x49, 0x7b, 0xe2, 0x88, 0x6c, 0xc7, 0x34, 0xc6, 0x9f, 0x34, 0x58, 0x48, + 0x51, 0x4e, 0x61, 0x4e, 0x3b, 0xd9, 0x39, 0x19, 0x2f, 0x37, 0xa7, 0x31, 0xd3, 0x38, 0xd2, 0xe0, + 0xbc, 0x42, 0x0d, 0x3a, 0x13, 0xc5, 0x3d, 0xf4, 0xd1, 0x31, 0xf1, 0x14, 0xaa, 0x1a, 0xa1, 0xe8, + 0x0f, 0x86, 0x53, 0x45, 0x5d, 0xda, 0xb8, 0x36, 0xc1, 0xac, 0x8e, 0xa5, 0x9a, 0xdc, 0x7f, 0x62, + 0x13, 0x4d, 0x22, 0xd7, 0x9e, 0xc2, 0xd2, 0xf1, 0xd9, 0xad, 0x42, 0xbe, 0xe5, 0xbb, 0x5d, 0x4f, + 0x4d, 0x2d, 0x75, 0x73, 0xb9, 0x13, 0x18, 0x69, 0xe8, 0x23, 0x55, 0xc8, 0xed, 0x73, 0xc7, 0x8a, + 0x04, 0x77, 0x36, 0xc2, 0xe4, 0x3e, 0xe4, 0x8e, 0x45, 0x95, 0x27, 0x40, 0x38, 0x89, 0xc0, 0x62, + 0x84, 0x12, 0x97, 0xf2, 0xd4, 0xbe, 0xd7, 0x60, 0x71, 0xc4, 0x53, 0xb2, 0x60, 0x73, 0x89, 0x3e, + 0xb3, 0x07, 0x2f, 0xc9, 0x85, 0xa0, 0xcb, 0xdf, 0x7e, 0xc2, 0x4c, 0xf9, 0x31, 0xb3, 0xbb, 0x28, + 0x68, 0x0c, 0x20, 0x5f, 0x40, 0xc9, 0x4c, 0x9e, 0xa5, 0xd1, 0x42, 0xdd, 0x9a, 0x60, 0xa1, 0xfe, + 0xe1, 0x31, 0x1b, 0x8e, 0x97, 0x02, 0xd0, 0xf4, 0x18, 0xb5, 0x3f, 0x73, 0x70, 0x2e, 0xd0, 0xfd, + 0xab, 0xe7, 0xe4, 0xab, 0xe7, 0xe4, 0xbf, 0xfd, 0x9c, 0xfc, 0x55, 0x03, 0x7d, 0x54, 0xad, 0x9d, + 0x42, 0x4b, 0x7d, 0x98, 0x6d, 0xa9, 0xef, 0x4d, 0xa0, 0xa9, 0x51, 0x99, 0x8e, 0xee, 0xad, 0xcd, + 0x3b, 0xcf, 0x8e, 0xca, 0x53, 0xcf, 0x8f, 0xca, 0x53, 0x2f, 0x8e, 0xca, 0x53, 0x5f, 0xf7, 0xcb, + 0xda, 0xb3, 0x7e, 0x59, 0x7b, 0xde, 0x2f, 0x6b, 0x2f, 0xfa, 0x65, 0xed, 0xb7, 0x7e, 0x59, 0xfb, + 0xf6, 0xf7, 0xf2, 0xd4, 0xa7, 0x57, 0x4e, 0xfc, 0xa3, 0xed, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, + 0xc6, 0xcf, 0x36, 0xd6, 0x8c, 0x13, 0x00, 0x00, +} + +func (m *ClusterRoleScopeRestriction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleScopeRestriction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleScopeRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.AllowEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.RoleNames) > 0 { + for iNdEx := len(m.RoleNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RoleNames[iNdEx]) + copy(dAtA[i:], m.RoleNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RoleNames[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *OAuthAccessToken) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthAccessToken) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthAccessToken) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.InactivityTimeoutSeconds)) + i-- + dAtA[i] = 0x50 + i -= len(m.RefreshToken) + copy(dAtA[i:], m.RefreshToken) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RefreshToken))) + i-- + dAtA[i] = 0x4a + i -= len(m.AuthorizeToken) + copy(dAtA[i:], m.AuthorizeToken) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AuthorizeToken))) + i-- + dAtA[i] = 0x42 + i -= len(m.UserUID) + copy(dAtA[i:], m.UserUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserUID))) + i-- + dAtA[i] = 0x3a + i -= len(m.UserName) + copy(dAtA[i:], m.UserName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserName))) + i-- + dAtA[i] = 0x32 + i -= len(m.RedirectURI) + copy(dAtA[i:], m.RedirectURI) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RedirectURI))) + i-- + dAtA[i] = 0x2a + if len(m.Scopes) > 0 { + for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Scopes[iNdEx]) + copy(dAtA[i:], m.Scopes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ExpiresIn)) + i-- + dAtA[i] = 0x18 + i -= len(m.ClientName) + copy(dAtA[i:], m.ClientName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthAccessTokenList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthAccessTokenList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthAccessTokenList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthAuthorizeToken) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthAuthorizeToken) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthAuthorizeToken) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.CodeChallengeMethod) + copy(dAtA[i:], m.CodeChallengeMethod) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CodeChallengeMethod))) + i-- + dAtA[i] = 0x52 + i -= len(m.CodeChallenge) + copy(dAtA[i:], m.CodeChallenge) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CodeChallenge))) + i-- + dAtA[i] = 0x4a + i -= len(m.UserUID) + copy(dAtA[i:], m.UserUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserUID))) + i-- + dAtA[i] = 0x42 + i -= len(m.UserName) + copy(dAtA[i:], m.UserName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserName))) + i-- + dAtA[i] = 0x3a + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x32 + i -= len(m.RedirectURI) + copy(dAtA[i:], m.RedirectURI) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RedirectURI))) + i-- + dAtA[i] = 0x2a + if len(m.Scopes) > 0 { + for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Scopes[iNdEx]) + copy(dAtA[i:], m.Scopes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ExpiresIn)) + i-- + dAtA[i] = 0x18 + i -= len(m.ClientName) + copy(dAtA[i:], m.ClientName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthAuthorizeTokenList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthAuthorizeTokenList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthAuthorizeTokenList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthClient) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthClient) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthClient) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AccessTokenInactivityTimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.AccessTokenInactivityTimeoutSeconds)) + i-- + dAtA[i] = 0x48 + } + if m.AccessTokenMaxAgeSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.AccessTokenMaxAgeSeconds)) + i-- + dAtA[i] = 0x40 + } + if len(m.ScopeRestrictions) > 0 { + for iNdEx := len(m.ScopeRestrictions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ScopeRestrictions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + i -= len(m.GrantMethod) + copy(dAtA[i:], m.GrantMethod) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GrantMethod))) + i-- + dAtA[i] = 0x32 + if len(m.RedirectURIs) > 0 { + for iNdEx := len(m.RedirectURIs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RedirectURIs[iNdEx]) + copy(dAtA[i:], m.RedirectURIs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RedirectURIs[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i-- + if m.RespondWithChallenges { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if len(m.AdditionalSecrets) > 0 { + for iNdEx := len(m.AdditionalSecrets) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdditionalSecrets[iNdEx]) + copy(dAtA[i:], m.AdditionalSecrets[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdditionalSecrets[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Secret) + copy(dAtA[i:], m.Secret) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthClientAuthorization) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthClientAuthorization) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthClientAuthorization) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Scopes) > 0 { + for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Scopes[iNdEx]) + copy(dAtA[i:], m.Scopes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i -= len(m.UserUID) + copy(dAtA[i:], m.UserUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserUID))) + i-- + dAtA[i] = 0x22 + i -= len(m.UserName) + copy(dAtA[i:], m.UserName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserName))) + i-- + dAtA[i] = 0x1a + i -= len(m.ClientName) + copy(dAtA[i:], m.ClientName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthClientAuthorizationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthClientAuthorizationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthClientAuthorizationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthClientList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthClientList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthClientList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthRedirectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthRedirectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthRedirectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Reference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RedirectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RedirectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RedirectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScopeRestriction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopeRestriction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScopeRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ClusterRole != nil { + { + size, err := m.ClusterRole.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ExactValues) > 0 { + for iNdEx := len(m.ExactValues) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExactValues[iNdEx]) + copy(dAtA[i:], m.ExactValues[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExactValues[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *UserOAuthAccessToken) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserOAuthAccessToken) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserOAuthAccessToken) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.InactivityTimeoutSeconds)) + i-- + dAtA[i] = 0x50 + i -= len(m.RefreshToken) + copy(dAtA[i:], m.RefreshToken) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RefreshToken))) + i-- + dAtA[i] = 0x4a + i -= len(m.AuthorizeToken) + copy(dAtA[i:], m.AuthorizeToken) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AuthorizeToken))) + i-- + dAtA[i] = 0x42 + i -= len(m.UserUID) + copy(dAtA[i:], m.UserUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserUID))) + i-- + dAtA[i] = 0x3a + i -= len(m.UserName) + copy(dAtA[i:], m.UserName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserName))) + i-- + dAtA[i] = 0x32 + i -= len(m.RedirectURI) + copy(dAtA[i:], m.RedirectURI) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RedirectURI))) + i-- + dAtA[i] = 0x2a + if len(m.Scopes) > 0 { + for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Scopes[iNdEx]) + copy(dAtA[i:], m.Scopes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ExpiresIn)) + i-- + dAtA[i] = 0x18 + i -= len(m.ClientName) + copy(dAtA[i:], m.ClientName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *UserOAuthAccessTokenList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserOAuthAccessTokenList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserOAuthAccessTokenList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ClusterRoleScopeRestriction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RoleNames) > 0 { + for _, s := range m.RoleNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + return n +} + +func (m *OAuthAccessToken) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ClientName) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.ExpiresIn)) + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RedirectURI) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UserName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UserUID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AuthorizeToken) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RefreshToken) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.InactivityTimeoutSeconds)) + return n +} + +func (m *OAuthAccessTokenList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *OAuthAuthorizeToken) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ClientName) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.ExpiresIn)) + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RedirectURI) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.State) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UserName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UserUID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CodeChallenge) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CodeChallengeMethod) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *OAuthAuthorizeTokenList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *OAuthClient) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AdditionalSecrets) > 0 { + for _, s := range m.AdditionalSecrets { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if len(m.RedirectURIs) > 0 { + for _, s := range m.RedirectURIs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.GrantMethod) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ScopeRestrictions) > 0 { + for _, e := range m.ScopeRestrictions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AccessTokenMaxAgeSeconds != nil { + n += 1 + sovGenerated(uint64(*m.AccessTokenMaxAgeSeconds)) + } + if m.AccessTokenInactivityTimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.AccessTokenInactivityTimeoutSeconds)) + } + return n +} + +func (m *OAuthClientAuthorization) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ClientName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UserName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UserUID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *OAuthClientAuthorizationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *OAuthClientList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *OAuthRedirectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Reference.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RedirectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScopeRestriction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ExactValues) > 0 { + for _, s := range m.ExactValues { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ClusterRole != nil { + l = m.ClusterRole.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *UserOAuthAccessToken) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ClientName) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.ExpiresIn)) + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RedirectURI) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UserName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UserUID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AuthorizeToken) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RefreshToken) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.InactivityTimeoutSeconds)) + return n +} + +func (m *UserOAuthAccessTokenList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterRoleScopeRestriction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleScopeRestriction{`, + `RoleNames:` + fmt.Sprintf("%v", this.RoleNames) + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `AllowEscalation:` + fmt.Sprintf("%v", this.AllowEscalation) + `,`, + `}`, + }, "") + return s +} +func (this *OAuthAccessToken) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OAuthAccessToken{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ClientName:` + fmt.Sprintf("%v", this.ClientName) + `,`, + `ExpiresIn:` + fmt.Sprintf("%v", this.ExpiresIn) + `,`, + `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `RedirectURI:` + fmt.Sprintf("%v", this.RedirectURI) + `,`, + `UserName:` + fmt.Sprintf("%v", this.UserName) + `,`, + `UserUID:` + fmt.Sprintf("%v", this.UserUID) + `,`, + `AuthorizeToken:` + fmt.Sprintf("%v", this.AuthorizeToken) + `,`, + `RefreshToken:` + fmt.Sprintf("%v", this.RefreshToken) + `,`, + `InactivityTimeoutSeconds:` + fmt.Sprintf("%v", this.InactivityTimeoutSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *OAuthAccessTokenList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]OAuthAccessToken{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "OAuthAccessToken", "OAuthAccessToken", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&OAuthAccessTokenList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *OAuthAuthorizeToken) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OAuthAuthorizeToken{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ClientName:` + fmt.Sprintf("%v", this.ClientName) + `,`, + `ExpiresIn:` + fmt.Sprintf("%v", this.ExpiresIn) + `,`, + `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `RedirectURI:` + fmt.Sprintf("%v", this.RedirectURI) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `UserName:` + fmt.Sprintf("%v", this.UserName) + `,`, + `UserUID:` + fmt.Sprintf("%v", this.UserUID) + `,`, + `CodeChallenge:` + fmt.Sprintf("%v", this.CodeChallenge) + `,`, + `CodeChallengeMethod:` + fmt.Sprintf("%v", this.CodeChallengeMethod) + `,`, + `}`, + }, "") + return s +} +func (this *OAuthAuthorizeTokenList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]OAuthAuthorizeToken{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "OAuthAuthorizeToken", "OAuthAuthorizeToken", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&OAuthAuthorizeTokenList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *OAuthClient) String() string { + if this == nil { + return "nil" + } + repeatedStringForScopeRestrictions := "[]ScopeRestriction{" + for _, f := range this.ScopeRestrictions { + repeatedStringForScopeRestrictions += strings.Replace(strings.Replace(f.String(), "ScopeRestriction", "ScopeRestriction", 1), `&`, ``, 1) + "," + } + repeatedStringForScopeRestrictions += "}" + s := strings.Join([]string{`&OAuthClient{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `AdditionalSecrets:` + fmt.Sprintf("%v", this.AdditionalSecrets) + `,`, + `RespondWithChallenges:` + fmt.Sprintf("%v", this.RespondWithChallenges) + `,`, + `RedirectURIs:` + fmt.Sprintf("%v", this.RedirectURIs) + `,`, + `GrantMethod:` + fmt.Sprintf("%v", this.GrantMethod) + `,`, + `ScopeRestrictions:` + repeatedStringForScopeRestrictions + `,`, + `AccessTokenMaxAgeSeconds:` + valueToStringGenerated(this.AccessTokenMaxAgeSeconds) + `,`, + `AccessTokenInactivityTimeoutSeconds:` + valueToStringGenerated(this.AccessTokenInactivityTimeoutSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *OAuthClientAuthorization) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OAuthClientAuthorization{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ClientName:` + fmt.Sprintf("%v", this.ClientName) + `,`, + `UserName:` + fmt.Sprintf("%v", this.UserName) + `,`, + `UserUID:` + fmt.Sprintf("%v", this.UserUID) + `,`, + `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `}`, + }, "") + return s +} +func (this *OAuthClientAuthorizationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]OAuthClientAuthorization{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "OAuthClientAuthorization", "OAuthClientAuthorization", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&OAuthClientAuthorizationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *OAuthClientList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]OAuthClient{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "OAuthClient", "OAuthClient", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&OAuthClientList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *OAuthRedirectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OAuthRedirectReference{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Reference:` + strings.Replace(strings.Replace(this.Reference.String(), "RedirectReference", "RedirectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RedirectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RedirectReference{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *ScopeRestriction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScopeRestriction{`, + `ExactValues:` + fmt.Sprintf("%v", this.ExactValues) + `,`, + `ClusterRole:` + strings.Replace(this.ClusterRole.String(), "ClusterRoleScopeRestriction", "ClusterRoleScopeRestriction", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UserOAuthAccessToken) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserOAuthAccessToken{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ClientName:` + fmt.Sprintf("%v", this.ClientName) + `,`, + `ExpiresIn:` + fmt.Sprintf("%v", this.ExpiresIn) + `,`, + `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `RedirectURI:` + fmt.Sprintf("%v", this.RedirectURI) + `,`, + `UserName:` + fmt.Sprintf("%v", this.UserName) + `,`, + `UserUID:` + fmt.Sprintf("%v", this.UserUID) + `,`, + `AuthorizeToken:` + fmt.Sprintf("%v", this.AuthorizeToken) + `,`, + `RefreshToken:` + fmt.Sprintf("%v", this.RefreshToken) + `,`, + `InactivityTimeoutSeconds:` + fmt.Sprintf("%v", this.InactivityTimeoutSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *UserOAuthAccessTokenList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]UserOAuthAccessToken{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "UserOAuthAccessToken", "UserOAuthAccessToken", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&UserOAuthAccessTokenList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ClusterRoleScopeRestriction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleScopeRestriction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleScopeRestriction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RoleNames = append(m.RoleNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowEscalation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowEscalation = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthAccessToken) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthAccessToken: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthAccessToken: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpiresIn", wireType) + } + m.ExpiresIn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExpiresIn |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scopes = append(m.Scopes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RedirectURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RedirectURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserUID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthorizeToken", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuthorizeToken = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RefreshToken", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RefreshToken = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InactivityTimeoutSeconds", wireType) + } + m.InactivityTimeoutSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InactivityTimeoutSeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthAccessTokenList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthAccessTokenList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthAccessTokenList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, OAuthAccessToken{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthAuthorizeToken) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthAuthorizeToken: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthAuthorizeToken: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpiresIn", wireType) + } + m.ExpiresIn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExpiresIn |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scopes = append(m.Scopes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RedirectURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RedirectURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserUID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CodeChallenge", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CodeChallenge = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CodeChallengeMethod", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CodeChallengeMethod = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthAuthorizeTokenList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthAuthorizeTokenList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthAuthorizeTokenList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, OAuthAuthorizeToken{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthClient) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthClient: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthClient: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalSecrets", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdditionalSecrets = append(m.AdditionalSecrets, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RespondWithChallenges", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RespondWithChallenges = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RedirectURIs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RedirectURIs = append(m.RedirectURIs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GrantMethod", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GrantMethod = GrantHandlerType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScopeRestrictions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ScopeRestrictions = append(m.ScopeRestrictions, ScopeRestriction{}) + if err := m.ScopeRestrictions[len(m.ScopeRestrictions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessTokenMaxAgeSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AccessTokenMaxAgeSeconds = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessTokenInactivityTimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AccessTokenInactivityTimeoutSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthClientAuthorization) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthClientAuthorization: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthClientAuthorization: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserUID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scopes = append(m.Scopes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthClientAuthorizationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthClientAuthorizationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthClientAuthorizationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, OAuthClientAuthorization{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthClientList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthClientList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthClientList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, OAuthClient{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthRedirectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthRedirectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthRedirectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Reference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RedirectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RedirectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RedirectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScopeRestriction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScopeRestriction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScopeRestriction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExactValues", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExactValues = append(m.ExactValues, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRole", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClusterRole == nil { + m.ClusterRole = &ClusterRoleScopeRestriction{} + } + if err := m.ClusterRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserOAuthAccessToken) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserOAuthAccessToken: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserOAuthAccessToken: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpiresIn", wireType) + } + m.ExpiresIn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExpiresIn |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scopes = append(m.Scopes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RedirectURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RedirectURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserUID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthorizeToken", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuthorizeToken = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RefreshToken", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RefreshToken = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InactivityTimeoutSeconds", wireType) + } + m.InactivityTimeoutSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InactivityTimeoutSeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserOAuthAccessTokenList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserOAuthAccessTokenList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserOAuthAccessTokenList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, UserOAuthAccessToken{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/oauth/v1/generated.proto b/vendor/github.com/openshift/api/oauth/v1/generated.proto new file mode 100644 index 0000000000000..4a5474e0c64d9 --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/generated.proto @@ -0,0 +1,321 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.oauth.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/oauth/v1"; + +// ClusterRoleScopeRestriction describes restrictions on cluster role scopes +message ClusterRoleScopeRestriction { + // roleNames is the list of cluster roles that can referenced. * means anything + repeated string roleNames = 1; + + // namespaces is the list of namespaces that can be referenced. * means any of them (including *) + repeated string namespaces = 2; + + // allowEscalation indicates whether you can request roles and their escalating resources + optional bool allowEscalation = 3; +} + +// OAuthAccessToken describes an OAuth access token. +// The name of a token must be prefixed with a `sha256~` string, must not contain "/" or "%" characters and must be at +// least 32 characters long. +// +// The name of the token is constructed from the actual token by sha256-hashing it and using URL-safe unpadded +// base64-encoding (as described in RFC4648) on the hashed result. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message OAuthAccessToken { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // clientName references the client that created this token. + optional string clientName = 2; + + // expiresIn is the seconds from CreationTime before this token expires. + optional int64 expiresIn = 3; + + // scopes is an array of the requested scopes. + repeated string scopes = 4; + + // redirectURI is the redirection associated with the token. + optional string redirectURI = 5; + + // userName is the user name associated with this token + optional string userName = 6; + + // userUID is the unique UID associated with this token + optional string userUID = 7; + + // authorizeToken contains the token that authorized this token + optional string authorizeToken = 8; + + // refreshToken is the value by which this token can be renewed. Can be blank. + optional string refreshToken = 9; + + // inactivityTimeoutSeconds is the value in seconds, from the + // CreationTimestamp, after which this token can no longer be used. + // The value is automatically incremented when the token is used. + optional int32 inactivityTimeoutSeconds = 10; +} + +// OAuthAccessTokenList is a collection of OAuth access tokens +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message OAuthAccessTokenList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of OAuth access tokens + repeated OAuthAccessToken items = 2; +} + +// OAuthAuthorizeToken describes an OAuth authorization token +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message OAuthAuthorizeToken { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // clientName references the client that created this token. + optional string clientName = 2; + + // expiresIn is the seconds from CreationTime before this token expires. + optional int64 expiresIn = 3; + + // scopes is an array of the requested scopes. + repeated string scopes = 4; + + // redirectURI is the redirection associated with the token. + optional string redirectURI = 5; + + // state data from request + optional string state = 6; + + // userName is the user name associated with this token + optional string userName = 7; + + // userUID is the unique UID associated with this token. UserUID and UserName must both match + // for this token to be valid. + optional string userUID = 8; + + // codeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636 + optional string codeChallenge = 9; + + // codeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636 + optional string codeChallengeMethod = 10; +} + +// OAuthAuthorizeTokenList is a collection of OAuth authorization tokens +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message OAuthAuthorizeTokenList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of OAuth authorization tokens + repeated OAuthAuthorizeToken items = 2; +} + +// OAuthClient describes an OAuth client +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message OAuthClient { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // secret is the unique secret associated with a client + optional string secret = 2; + + // additionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation + // and for service account token validation + repeated string additionalSecrets = 3; + + // respondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects + optional bool respondWithChallenges = 4; + + // redirectURIs is the valid redirection URIs associated with a client + // +patchStrategy=merge + repeated string redirectURIs = 5; + + // grantMethod is a required field which determines how to handle grants for this client. + // Valid grant handling methods are: + // - auto: always approves grant requests, useful for trusted clients + // - prompt: prompts the end user for approval of grant requests, useful for third-party clients + optional string grantMethod = 6; + + // scopeRestrictions describes which scopes this client can request. Each requested scope + // is checked against each restriction. If any restriction matches, then the scope is allowed. + // If no restriction matches, then the scope is denied. + repeated ScopeRestriction scopeRestrictions = 7; + + // accessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. + // 0 means no expiration. + optional int32 accessTokenMaxAgeSeconds = 8; + + // accessTokenInactivityTimeoutSeconds overrides the default token + // inactivity timeout for tokens granted to this client. + // The value represents the maximum amount of time that can occur between + // consecutive uses of the token. Tokens become invalid if they are not + // used within this temporal window. The user will need to acquire a new + // token to regain access once a token times out. + // This value needs to be set only if the default set in configuration is + // not appropriate for this client. Valid values are: + // - 0: Tokens for this client never time out + // - X: Tokens time out if there is no activity for X seconds + // The current minimum allowed value for X is 300 (5 minutes) + // + // WARNING: existing tokens' timeout will not be affected (lowered) by changing this value + optional int32 accessTokenInactivityTimeoutSeconds = 9; +} + +// OAuthClientAuthorization describes an authorization created by an OAuth client +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message OAuthClientAuthorization { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // clientName references the client that created this authorization + optional string clientName = 2; + + // userName is the user name that authorized this client + optional string userName = 3; + + // userUID is the unique UID associated with this authorization. UserUID and UserName + // must both match for this authorization to be valid. + optional string userUID = 4; + + // scopes is an array of the granted scopes. + repeated string scopes = 5; +} + +// OAuthClientAuthorizationList is a collection of OAuth client authorizations +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message OAuthClientAuthorizationList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of OAuth client authorizations + repeated OAuthClientAuthorization items = 2; +} + +// OAuthClientList is a collection of OAuth clients +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message OAuthClientList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of OAuth clients + repeated OAuthClient items = 2; +} + +// OAuthRedirectReference is a reference to an OAuth redirect object. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message OAuthRedirectReference { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The reference to an redirect object in the current namespace. + optional RedirectReference reference = 2; +} + +// RedirectReference specifies the target in the current namespace that resolves into redirect URIs. Only the 'Route' kind is currently allowed. +message RedirectReference { + // The group of the target that is being referred to. + optional string group = 1; + + // The kind of the target that is being referred to. Currently, only 'Route' is allowed. + optional string kind = 2; + + // The name of the target that is being referred to. e.g. name of the Route. + optional string name = 3; +} + +// ScopeRestriction describe one restriction on scopes. Exactly one option must be non-nil. +message ScopeRestriction { + // ExactValues means the scope has to match a particular set of strings exactly + repeated string literals = 1; + + // clusterRole describes a set of restrictions for cluster role scoping. + optional ClusterRoleScopeRestriction clusterRole = 2; +} + +// UserOAuthAccessToken is a virtual resource to mirror OAuthAccessTokens to +// the user the access token was issued for +// +openshift:compatibility-gen:level=1 +message UserOAuthAccessToken { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // clientName references the client that created this token. + optional string clientName = 2; + + // expiresIn is the seconds from CreationTime before this token expires. + optional int64 expiresIn = 3; + + // scopes is an array of the requested scopes. + repeated string scopes = 4; + + // redirectURI is the redirection associated with the token. + optional string redirectURI = 5; + + // userName is the user name associated with this token + optional string userName = 6; + + // userUID is the unique UID associated with this token + optional string userUID = 7; + + // authorizeToken contains the token that authorized this token + optional string authorizeToken = 8; + + // refreshToken is the value by which this token can be renewed. Can be blank. + optional string refreshToken = 9; + + // inactivityTimeoutSeconds is the value in seconds, from the + // CreationTimestamp, after which this token can no longer be used. + // The value is automatically incremented when the token is used. + optional int32 inactivityTimeoutSeconds = 10; +} + +// UserOAuthAccessTokenList is a collection of access tokens issued on behalf of +// the requesting user +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message UserOAuthAccessTokenList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated UserOAuthAccessToken items = 2; +} + diff --git a/vendor/github.com/openshift/api/oauth/v1/legacy.go b/vendor/github.com/openshift/api/oauth/v1/legacy.go new file mode 100644 index 0000000000000..65b57d2431255 --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/legacy.go @@ -0,0 +1,30 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme, extensionsv1beta1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &OAuthAccessToken{}, + &OAuthAccessTokenList{}, + &OAuthAuthorizeToken{}, + &OAuthAuthorizeTokenList{}, + &OAuthClient{}, + &OAuthClientList{}, + &OAuthClientAuthorization{}, + &OAuthClientAuthorizationList{}, + &OAuthRedirectReference{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/oauth/v1/register.go b/vendor/github.com/openshift/api/oauth/v1/register.go new file mode 100644 index 0000000000000..9992dffea9bae --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/register.go @@ -0,0 +1,47 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "oauth.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &OAuthAccessToken{}, + &OAuthAccessTokenList{}, + &OAuthAuthorizeToken{}, + &OAuthAuthorizeTokenList{}, + &OAuthClient{}, + &OAuthClientList{}, + &OAuthClientAuthorization{}, + &OAuthClientAuthorizationList{}, + &OAuthRedirectReference{}, + &UserOAuthAccessToken{}, + &UserOAuthAccessTokenList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/oauth/v1/types.go b/vendor/github.com/openshift/api/oauth/v1/types.go new file mode 100644 index 0000000000000..5a70b4774945e --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/types.go @@ -0,0 +1,341 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthAccessToken describes an OAuth access token. +// The name of a token must be prefixed with a `sha256~` string, must not contain "/" or "%" characters and must be at +// least 32 characters long. +// +// The name of the token is constructed from the actual token by sha256-hashing it and using URL-safe unpadded +// base64-encoding (as described in RFC4648) on the hashed result. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuthAccessToken struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // clientName references the client that created this token. + ClientName string `json:"clientName,omitempty" protobuf:"bytes,2,opt,name=clientName"` + + // expiresIn is the seconds from CreationTime before this token expires. + ExpiresIn int64 `json:"expiresIn,omitempty" protobuf:"varint,3,opt,name=expiresIn"` + + // scopes is an array of the requested scopes. + Scopes []string `json:"scopes,omitempty" protobuf:"bytes,4,rep,name=scopes"` + + // redirectURI is the redirection associated with the token. + RedirectURI string `json:"redirectURI,omitempty" protobuf:"bytes,5,opt,name=redirectURI"` + + // userName is the user name associated with this token + UserName string `json:"userName,omitempty" protobuf:"bytes,6,opt,name=userName"` + + // userUID is the unique UID associated with this token + UserUID string `json:"userUID,omitempty" protobuf:"bytes,7,opt,name=userUID"` + + // authorizeToken contains the token that authorized this token + AuthorizeToken string `json:"authorizeToken,omitempty" protobuf:"bytes,8,opt,name=authorizeToken"` + + // refreshToken is the value by which this token can be renewed. Can be blank. + RefreshToken string `json:"refreshToken,omitempty" protobuf:"bytes,9,opt,name=refreshToken"` + + // inactivityTimeoutSeconds is the value in seconds, from the + // CreationTimestamp, after which this token can no longer be used. + // The value is automatically incremented when the token is used. + InactivityTimeoutSeconds int32 `json:"inactivityTimeoutSeconds,omitempty" protobuf:"varint,10,opt,name=inactivityTimeoutSeconds"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthAuthorizeToken describes an OAuth authorization token +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuthAuthorizeToken struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // clientName references the client that created this token. + ClientName string `json:"clientName,omitempty" protobuf:"bytes,2,opt,name=clientName"` + + // expiresIn is the seconds from CreationTime before this token expires. + ExpiresIn int64 `json:"expiresIn,omitempty" protobuf:"varint,3,opt,name=expiresIn"` + + // scopes is an array of the requested scopes. + Scopes []string `json:"scopes,omitempty" protobuf:"bytes,4,rep,name=scopes"` + + // redirectURI is the redirection associated with the token. + RedirectURI string `json:"redirectURI,omitempty" protobuf:"bytes,5,opt,name=redirectURI"` + + // state data from request + State string `json:"state,omitempty" protobuf:"bytes,6,opt,name=state"` + + // userName is the user name associated with this token + UserName string `json:"userName,omitempty" protobuf:"bytes,7,opt,name=userName"` + + // userUID is the unique UID associated with this token. UserUID and UserName must both match + // for this token to be valid. + UserUID string `json:"userUID,omitempty" protobuf:"bytes,8,opt,name=userUID"` + + // codeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636 + CodeChallenge string `json:"codeChallenge,omitempty" protobuf:"bytes,9,opt,name=codeChallenge"` + + // codeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636 + CodeChallengeMethod string `json:"codeChallengeMethod,omitempty" protobuf:"bytes,10,opt,name=codeChallengeMethod"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthClient describes an OAuth client +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuthClient struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // secret is the unique secret associated with a client + Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` + + // additionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation + // and for service account token validation + AdditionalSecrets []string `json:"additionalSecrets,omitempty" protobuf:"bytes,3,rep,name=additionalSecrets"` + + // respondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects + RespondWithChallenges bool `json:"respondWithChallenges,omitempty" protobuf:"varint,4,opt,name=respondWithChallenges"` + + // redirectURIs is the valid redirection URIs associated with a client + // +patchStrategy=merge + RedirectURIs []string `json:"redirectURIs,omitempty" patchStrategy:"merge" protobuf:"bytes,5,rep,name=redirectURIs"` + + // grantMethod is a required field which determines how to handle grants for this client. + // Valid grant handling methods are: + // - auto: always approves grant requests, useful for trusted clients + // - prompt: prompts the end user for approval of grant requests, useful for third-party clients + GrantMethod GrantHandlerType `json:"grantMethod,omitempty" protobuf:"bytes,6,opt,name=grantMethod,casttype=GrantHandlerType"` + + // scopeRestrictions describes which scopes this client can request. Each requested scope + // is checked against each restriction. If any restriction matches, then the scope is allowed. + // If no restriction matches, then the scope is denied. + ScopeRestrictions []ScopeRestriction `json:"scopeRestrictions,omitempty" protobuf:"bytes,7,rep,name=scopeRestrictions"` + + // accessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. + // 0 means no expiration. + AccessTokenMaxAgeSeconds *int32 `json:"accessTokenMaxAgeSeconds,omitempty" protobuf:"varint,8,opt,name=accessTokenMaxAgeSeconds"` + + // accessTokenInactivityTimeoutSeconds overrides the default token + // inactivity timeout for tokens granted to this client. + // The value represents the maximum amount of time that can occur between + // consecutive uses of the token. Tokens become invalid if they are not + // used within this temporal window. The user will need to acquire a new + // token to regain access once a token times out. + // This value needs to be set only if the default set in configuration is + // not appropriate for this client. Valid values are: + // - 0: Tokens for this client never time out + // - X: Tokens time out if there is no activity for X seconds + // The current minimum allowed value for X is 300 (5 minutes) + // + // WARNING: existing tokens' timeout will not be affected (lowered) by changing this value + AccessTokenInactivityTimeoutSeconds *int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty" protobuf:"varint,9,opt,name=accessTokenInactivityTimeoutSeconds"` +} + +type GrantHandlerType string + +const ( + // GrantHandlerAuto auto-approves client authorization grant requests + GrantHandlerAuto GrantHandlerType = "auto" + // GrantHandlerPrompt prompts the user to approve new client authorization grant requests + GrantHandlerPrompt GrantHandlerType = "prompt" + // GrantHandlerDeny auto-denies client authorization grant requests + GrantHandlerDeny GrantHandlerType = "deny" +) + +// ScopeRestriction describe one restriction on scopes. Exactly one option must be non-nil. +type ScopeRestriction struct { + // ExactValues means the scope has to match a particular set of strings exactly + ExactValues []string `json:"literals,omitempty" protobuf:"bytes,1,rep,name=literals"` + + // clusterRole describes a set of restrictions for cluster role scoping. + ClusterRole *ClusterRoleScopeRestriction `json:"clusterRole,omitempty" protobuf:"bytes,2,opt,name=clusterRole"` +} + +// ClusterRoleScopeRestriction describes restrictions on cluster role scopes +type ClusterRoleScopeRestriction struct { + // roleNames is the list of cluster roles that can referenced. * means anything + RoleNames []string `json:"roleNames" protobuf:"bytes,1,rep,name=roleNames"` + // namespaces is the list of namespaces that can be referenced. * means any of them (including *) + Namespaces []string `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"` + // allowEscalation indicates whether you can request roles and their escalating resources + AllowEscalation bool `json:"allowEscalation" protobuf:"varint,3,opt,name=allowEscalation"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthClientAuthorization describes an authorization created by an OAuth client +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuthClientAuthorization struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // clientName references the client that created this authorization + ClientName string `json:"clientName,omitempty" protobuf:"bytes,2,opt,name=clientName"` + + // userName is the user name that authorized this client + UserName string `json:"userName,omitempty" protobuf:"bytes,3,opt,name=userName"` + + // userUID is the unique UID associated with this authorization. UserUID and UserName + // must both match for this authorization to be valid. + UserUID string `json:"userUID,omitempty" protobuf:"bytes,4,opt,name=userUID"` + + // scopes is an array of the granted scopes. + Scopes []string `json:"scopes,omitempty" protobuf:"bytes,5,rep,name=scopes"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthAccessTokenList is a collection of OAuth access tokens +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuthAccessTokenList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of OAuth access tokens + Items []OAuthAccessToken `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthAuthorizeTokenList is a collection of OAuth authorization tokens +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuthAuthorizeTokenList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of OAuth authorization tokens + Items []OAuthAuthorizeToken `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthClientList is a collection of OAuth clients +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuthClientList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of OAuth clients + Items []OAuthClient `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthClientAuthorizationList is a collection of OAuth client authorizations +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuthClientAuthorizationList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of OAuth client authorizations + Items []OAuthClientAuthorization `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthRedirectReference is a reference to an OAuth redirect object. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuthRedirectReference struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The reference to an redirect object in the current namespace. + Reference RedirectReference `json:"reference,omitempty" protobuf:"bytes,2,opt,name=reference"` +} + +// RedirectReference specifies the target in the current namespace that resolves into redirect URIs. Only the 'Route' kind is currently allowed. +type RedirectReference struct { + // The group of the target that is being referred to. + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + + // The kind of the target that is being referred to. Currently, only 'Route' is allowed. + Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` + + // The name of the target that is being referred to. e.g. name of the Route. + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// UserOAuthAccessToken is a virtual resource to mirror OAuthAccessTokens to +// the user the access token was issued for +// +openshift:compatibility-gen:level=1 +type UserOAuthAccessToken OAuthAccessToken + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// UserOAuthAccessTokenList is a collection of access tokens issued on behalf of +// the requesting user +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type UserOAuthAccessTokenList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []UserOAuthAccessToken `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/openshift/api/oauth/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/oauth/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..f1af9dc5f0690 --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/zz_generated.deepcopy.go @@ -0,0 +1,447 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleScopeRestriction) DeepCopyInto(out *ClusterRoleScopeRestriction) { + *out = *in + if in.RoleNames != nil { + in, out := &in.RoleNames, &out.RoleNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleScopeRestriction. +func (in *ClusterRoleScopeRestriction) DeepCopy() *ClusterRoleScopeRestriction { + if in == nil { + return nil + } + out := new(ClusterRoleScopeRestriction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthAccessToken) DeepCopyInto(out *OAuthAccessToken) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAccessToken. +func (in *OAuthAccessToken) DeepCopy() *OAuthAccessToken { + if in == nil { + return nil + } + out := new(OAuthAccessToken) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthAccessToken) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthAccessTokenList) DeepCopyInto(out *OAuthAccessTokenList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OAuthAccessToken, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAccessTokenList. +func (in *OAuthAccessTokenList) DeepCopy() *OAuthAccessTokenList { + if in == nil { + return nil + } + out := new(OAuthAccessTokenList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthAccessTokenList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthAuthorizeToken) DeepCopyInto(out *OAuthAuthorizeToken) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAuthorizeToken. +func (in *OAuthAuthorizeToken) DeepCopy() *OAuthAuthorizeToken { + if in == nil { + return nil + } + out := new(OAuthAuthorizeToken) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthAuthorizeToken) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthAuthorizeTokenList) DeepCopyInto(out *OAuthAuthorizeTokenList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OAuthAuthorizeToken, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAuthorizeTokenList. +func (in *OAuthAuthorizeTokenList) DeepCopy() *OAuthAuthorizeTokenList { + if in == nil { + return nil + } + out := new(OAuthAuthorizeTokenList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthAuthorizeTokenList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthClient) DeepCopyInto(out *OAuthClient) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.AdditionalSecrets != nil { + in, out := &in.AdditionalSecrets, &out.AdditionalSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.RedirectURIs != nil { + in, out := &in.RedirectURIs, &out.RedirectURIs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ScopeRestrictions != nil { + in, out := &in.ScopeRestrictions, &out.ScopeRestrictions + *out = make([]ScopeRestriction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AccessTokenMaxAgeSeconds != nil { + in, out := &in.AccessTokenMaxAgeSeconds, &out.AccessTokenMaxAgeSeconds + *out = new(int32) + **out = **in + } + if in.AccessTokenInactivityTimeoutSeconds != nil { + in, out := &in.AccessTokenInactivityTimeoutSeconds, &out.AccessTokenInactivityTimeoutSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthClient. +func (in *OAuthClient) DeepCopy() *OAuthClient { + if in == nil { + return nil + } + out := new(OAuthClient) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthClient) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthClientAuthorization) DeepCopyInto(out *OAuthClientAuthorization) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthClientAuthorization. +func (in *OAuthClientAuthorization) DeepCopy() *OAuthClientAuthorization { + if in == nil { + return nil + } + out := new(OAuthClientAuthorization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthClientAuthorization) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthClientAuthorizationList) DeepCopyInto(out *OAuthClientAuthorizationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OAuthClientAuthorization, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthClientAuthorizationList. +func (in *OAuthClientAuthorizationList) DeepCopy() *OAuthClientAuthorizationList { + if in == nil { + return nil + } + out := new(OAuthClientAuthorizationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthClientAuthorizationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthClientList) DeepCopyInto(out *OAuthClientList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OAuthClient, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthClientList. +func (in *OAuthClientList) DeepCopy() *OAuthClientList { + if in == nil { + return nil + } + out := new(OAuthClientList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthClientList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthRedirectReference) DeepCopyInto(out *OAuthRedirectReference) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Reference = in.Reference + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthRedirectReference. +func (in *OAuthRedirectReference) DeepCopy() *OAuthRedirectReference { + if in == nil { + return nil + } + out := new(OAuthRedirectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthRedirectReference) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedirectReference) DeepCopyInto(out *RedirectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectReference. +func (in *RedirectReference) DeepCopy() *RedirectReference { + if in == nil { + return nil + } + out := new(RedirectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopeRestriction) DeepCopyInto(out *ScopeRestriction) { + *out = *in + if in.ExactValues != nil { + in, out := &in.ExactValues, &out.ExactValues + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ClusterRole != nil { + in, out := &in.ClusterRole, &out.ClusterRole + *out = new(ClusterRoleScopeRestriction) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeRestriction. +func (in *ScopeRestriction) DeepCopy() *ScopeRestriction { + if in == nil { + return nil + } + out := new(ScopeRestriction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserOAuthAccessToken) DeepCopyInto(out *UserOAuthAccessToken) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserOAuthAccessToken. +func (in *UserOAuthAccessToken) DeepCopy() *UserOAuthAccessToken { + if in == nil { + return nil + } + out := new(UserOAuthAccessToken) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserOAuthAccessToken) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserOAuthAccessTokenList) DeepCopyInto(out *UserOAuthAccessTokenList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]UserOAuthAccessToken, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserOAuthAccessTokenList. +func (in *UserOAuthAccessTokenList) DeepCopy() *UserOAuthAccessTokenList { + if in == nil { + return nil + } + out := new(UserOAuthAccessTokenList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserOAuthAccessTokenList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/github.com/openshift/api/oauth/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/oauth/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000..171b5221f6952 --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,171 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_ClusterRoleScopeRestriction = map[string]string{ + "": "ClusterRoleScopeRestriction describes restrictions on cluster role scopes", + "roleNames": "roleNames is the list of cluster roles that can referenced. * means anything", + "namespaces": "namespaces is the list of namespaces that can be referenced. * means any of them (including *)", + "allowEscalation": "allowEscalation indicates whether you can request roles and their escalating resources", +} + +func (ClusterRoleScopeRestriction) SwaggerDoc() map[string]string { + return map_ClusterRoleScopeRestriction +} + +var map_OAuthAccessToken = map[string]string{ + "": "OAuthAccessToken describes an OAuth access token. The name of a token must be prefixed with a `sha256~` string, must not contain \"/\" or \"%\" characters and must be at least 32 characters long.\n\nThe name of the token is constructed from the actual token by sha256-hashing it and using URL-safe unpadded base64-encoding (as described in RFC4648) on the hashed result.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "clientName": "clientName references the client that created this token.", + "expiresIn": "expiresIn is the seconds from CreationTime before this token expires.", + "scopes": "scopes is an array of the requested scopes.", + "redirectURI": "redirectURI is the redirection associated with the token.", + "userName": "userName is the user name associated with this token", + "userUID": "userUID is the unique UID associated with this token", + "authorizeToken": "authorizeToken contains the token that authorized this token", + "refreshToken": "refreshToken is the value by which this token can be renewed. Can be blank.", + "inactivityTimeoutSeconds": "inactivityTimeoutSeconds is the value in seconds, from the CreationTimestamp, after which this token can no longer be used. The value is automatically incremented when the token is used.", +} + +func (OAuthAccessToken) SwaggerDoc() map[string]string { + return map_OAuthAccessToken +} + +var map_OAuthAccessTokenList = map[string]string{ + "": "OAuthAccessTokenList is a collection of OAuth access tokens\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of OAuth access tokens", +} + +func (OAuthAccessTokenList) SwaggerDoc() map[string]string { + return map_OAuthAccessTokenList +} + +var map_OAuthAuthorizeToken = map[string]string{ + "": "OAuthAuthorizeToken describes an OAuth authorization token\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "clientName": "clientName references the client that created this token.", + "expiresIn": "expiresIn is the seconds from CreationTime before this token expires.", + "scopes": "scopes is an array of the requested scopes.", + "redirectURI": "redirectURI is the redirection associated with the token.", + "state": "state data from request", + "userName": "userName is the user name associated with this token", + "userUID": "userUID is the unique UID associated with this token. UserUID and UserName must both match for this token to be valid.", + "codeChallenge": "codeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636", + "codeChallengeMethod": "codeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636", +} + +func (OAuthAuthorizeToken) SwaggerDoc() map[string]string { + return map_OAuthAuthorizeToken +} + +var map_OAuthAuthorizeTokenList = map[string]string{ + "": "OAuthAuthorizeTokenList is a collection of OAuth authorization tokens\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of OAuth authorization tokens", +} + +func (OAuthAuthorizeTokenList) SwaggerDoc() map[string]string { + return map_OAuthAuthorizeTokenList +} + +var map_OAuthClient = map[string]string{ + "": "OAuthClient describes an OAuth client\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "secret": "secret is the unique secret associated with a client", + "additionalSecrets": "additionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation and for service account token validation", + "respondWithChallenges": "respondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects", + "redirectURIs": "redirectURIs is the valid redirection URIs associated with a client", + "grantMethod": "grantMethod is a required field which determines how to handle grants for this client. Valid grant handling methods are:\n - auto: always approves grant requests, useful for trusted clients\n - prompt: prompts the end user for approval of grant requests, useful for third-party clients", + "scopeRestrictions": "scopeRestrictions describes which scopes this client can request. Each requested scope is checked against each restriction. If any restriction matches, then the scope is allowed. If no restriction matches, then the scope is denied.", + "accessTokenMaxAgeSeconds": "accessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. 0 means no expiration.", + "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds overrides the default token inactivity timeout for tokens granted to this client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. This value needs to be set only if the default set in configuration is not appropriate for this client. Valid values are: - 0: Tokens for this client never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)\n\nWARNING: existing tokens' timeout will not be affected (lowered) by changing this value", +} + +func (OAuthClient) SwaggerDoc() map[string]string { + return map_OAuthClient +} + +var map_OAuthClientAuthorization = map[string]string{ + "": "OAuthClientAuthorization describes an authorization created by an OAuth client\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "clientName": "clientName references the client that created this authorization", + "userName": "userName is the user name that authorized this client", + "userUID": "userUID is the unique UID associated with this authorization. UserUID and UserName must both match for this authorization to be valid.", + "scopes": "scopes is an array of the granted scopes.", +} + +func (OAuthClientAuthorization) SwaggerDoc() map[string]string { + return map_OAuthClientAuthorization +} + +var map_OAuthClientAuthorizationList = map[string]string{ + "": "OAuthClientAuthorizationList is a collection of OAuth client authorizations\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of OAuth client authorizations", +} + +func (OAuthClientAuthorizationList) SwaggerDoc() map[string]string { + return map_OAuthClientAuthorizationList +} + +var map_OAuthClientList = map[string]string{ + "": "OAuthClientList is a collection of OAuth clients\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of OAuth clients", +} + +func (OAuthClientList) SwaggerDoc() map[string]string { + return map_OAuthClientList +} + +var map_OAuthRedirectReference = map[string]string{ + "": "OAuthRedirectReference is a reference to an OAuth redirect object.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "reference": "The reference to an redirect object in the current namespace.", +} + +func (OAuthRedirectReference) SwaggerDoc() map[string]string { + return map_OAuthRedirectReference +} + +var map_RedirectReference = map[string]string{ + "": "RedirectReference specifies the target in the current namespace that resolves into redirect URIs. Only the 'Route' kind is currently allowed.", + "group": "The group of the target that is being referred to.", + "kind": "The kind of the target that is being referred to. Currently, only 'Route' is allowed.", + "name": "The name of the target that is being referred to. e.g. name of the Route.", +} + +func (RedirectReference) SwaggerDoc() map[string]string { + return map_RedirectReference +} + +var map_ScopeRestriction = map[string]string{ + "": "ScopeRestriction describe one restriction on scopes. Exactly one option must be non-nil.", + "literals": "ExactValues means the scope has to match a particular set of strings exactly", + "clusterRole": "clusterRole describes a set of restrictions for cluster role scoping.", +} + +func (ScopeRestriction) SwaggerDoc() map[string]string { + return map_ScopeRestriction +} + +var map_UserOAuthAccessTokenList = map[string]string{ + "": "UserOAuthAccessTokenList is a collection of access tokens issued on behalf of the requesting user\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (UserOAuthAccessTokenList) SwaggerDoc() map[string]string { + return map_UserOAuthAccessTokenList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/operator/v1/Makefile b/vendor/github.com/openshift/api/operator/v1/Makefile new file mode 100644 index 0000000000000..77f5d340910c5 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="operator.openshift.io/v1" diff --git a/vendor/github.com/openshift/api/operator/v1/doc.go b/vendor/github.com/openshift/api/operator/v1/doc.go new file mode 100644 index 0000000000000..3de961a7fc278 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=operator.openshift.io +package v1 diff --git a/vendor/github.com/openshift/api/operator/v1/register.go b/vendor/github.com/openshift/api/operator/v1/register.go new file mode 100644 index 0000000000000..5920c4fca71d0 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/register.go @@ -0,0 +1,82 @@ +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "operator.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + + scheme.AddKnownTypes(GroupVersion, + &Authentication{}, + &AuthenticationList{}, + &DNS{}, + &DNSList{}, + &CloudCredential{}, + &CloudCredentialList{}, + &ClusterCSIDriver{}, + &ClusterCSIDriverList{}, + &Console{}, + &ConsoleList{}, + &CSISnapshotController{}, + &CSISnapshotControllerList{}, + &Etcd{}, + &EtcdList{}, + &KubeAPIServer{}, + &KubeAPIServerList{}, + &KubeControllerManager{}, + &KubeControllerManagerList{}, + &KubeScheduler{}, + &KubeSchedulerList{}, + &KubeStorageVersionMigrator{}, + &KubeStorageVersionMigratorList{}, + &MachineConfiguration{}, + &MachineConfigurationList{}, + &Network{}, + &NetworkList{}, + &OpenShiftAPIServer{}, + &OpenShiftAPIServerList{}, + &OpenShiftControllerManager{}, + &OpenShiftControllerManagerList{}, + &OLM{}, + &OLMList{}, + &ServiceCA{}, + &ServiceCAList{}, + &ServiceCatalogAPIServer{}, + &ServiceCatalogAPIServerList{}, + &ServiceCatalogControllerManager{}, + &ServiceCatalogControllerManagerList{}, + &IngressController{}, + &IngressControllerList{}, + &InsightsOperator{}, + &InsightsOperatorList{}, + &Storage{}, + &StorageList{}, + ) + + return nil +} diff --git a/vendor/github.com/openshift/api/operator/v1/types.go b/vendor/github.com/openshift/api/operator/v1/types.go new file mode 100644 index 0000000000000..b46448c8d68e8 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types.go @@ -0,0 +1,282 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// MyOperatorResource is an example operator configuration type +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:internal +type MyOperatorResource struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // +required + Spec MyOperatorResourceSpec `json:"spec"` + Status MyOperatorResourceStatus `json:"status"` +} + +type MyOperatorResourceSpec struct { + OperatorSpec `json:",inline"` +} + +type MyOperatorResourceStatus struct { + OperatorStatus `json:",inline"` +} + +// +kubebuilder:validation:Pattern=`^(Managed|Unmanaged|Force|Removed)$` +type ManagementState string + +var ( + // Force means that the operator is actively managing its resources but will not block an upgrade + // if unmet prereqs exist. This state puts the operator at risk for unsuccessful upgrades + Force ManagementState = "Force" + // Managed means that the operator is actively managing its resources and trying to keep the component active. + // It will only upgrade the component if it is safe to do so + Managed ManagementState = "Managed" + // Unmanaged means that the operator will not take any action related to the component + // Some operators might not support this management state as it might damage the cluster and lead to manual recovery. + Unmanaged ManagementState = "Unmanaged" + // Removed means that the operator is actively managing its resources and trying to remove all traces of the component + // Some operators (like kube-apiserver-operator) might not support this management state as removing the API server will + // brick the cluster. + Removed ManagementState = "Removed" +) + +// OperatorSpec contains common fields operators need. It is intended to be anonymous included +// inside of the Spec struct for your particular operator. +type OperatorSpec struct { + // managementState indicates whether and how the operator should manage the component + ManagementState ManagementState `json:"managementState"` + + // logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a + // simple way to manage coarse grained logging choices that operators have to interpret for their operands. + // + // Valid values are: "Normal", "Debug", "Trace", "TraceAll". + // Defaults to "Normal". + // +optional + // +kubebuilder:default=Normal + LogLevel LogLevel `json:"logLevel,omitempty"` + + // operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a + // simple way to manage coarse grained logging choices that operators have to interpret for themselves. + // + // Valid values are: "Normal", "Debug", "Trace", "TraceAll". + // Defaults to "Normal". + // +optional + // +kubebuilder:default=Normal + OperatorLogLevel LogLevel `json:"operatorLogLevel,omitempty"` + + // unsupportedConfigOverrides overrides the final configuration that was computed by the operator. + // Red Hat does not support the use of this field. + // Misuse of this field could lead to unexpected behavior or conflict with other configuration options. + // Seek guidance from the Red Hat support before using this field. + // Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + // +optional + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + UnsupportedConfigOverrides runtime.RawExtension `json:"unsupportedConfigOverrides"` + + // observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because + // it is an input to the level for the operator + // +optional + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + ObservedConfig runtime.RawExtension `json:"observedConfig"` +} + +// +kubebuilder:validation:Enum="";Normal;Debug;Trace;TraceAll +type LogLevel string + +var ( + // Normal is the default. Normal, working log information, everything is fine, but helpful notices for auditing or common operations. In kube, this is probably glog=2. + Normal LogLevel = "Normal" + + // Debug is used when something went wrong. Even common operations may be logged, and less helpful but more quantity of notices. In kube, this is probably glog=4. + Debug LogLevel = "Debug" + + // Trace is used when something went really badly and even more verbose logs are needed. Logging every function call as part of a common operation, to tracing execution of a query. In kube, this is probably glog=6. + Trace LogLevel = "Trace" + + // TraceAll is used when something is broken at the level of API content/decoding. It will dump complete body content. If you turn this on in a production cluster + // prepare from serious performance issues and massive amounts of logs. In kube, this is probably glog=8. + TraceAll LogLevel = "TraceAll" +) + +type OperatorStatus struct { + // observedGeneration is the last generation change you've dealt with + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // conditions is a list of conditions and their status + // +listType=map + // +listMapKey=type + // +optional + Conditions []OperatorCondition `json:"conditions,omitempty"` + + // version is the level this availability applies to + // +optional + Version string `json:"version,omitempty"` + + // readyReplicas indicates how many replicas are ready and at the desired state + ReadyReplicas int32 `json:"readyReplicas"` + + // latestAvailableRevision is the deploymentID of the most recent deployment + // +optional + // +kubebuilder:validation:XValidation:rule="self >= oldSelf",message="must only increase" + LatestAvailableRevision int32 `json:"latestAvailableRevision,omitempty"` + + // generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + // +listType=map + // +listMapKey=group + // +listMapKey=resource + // +listMapKey=namespace + // +listMapKey=name + // +optional + Generations []GenerationStatus `json:"generations,omitempty"` +} + +// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +type GenerationStatus struct { + // group is the group of the thing you're tracking + // +required + Group string `json:"group"` + + // resource is the resource type of the thing you're tracking + // +required + Resource string `json:"resource"` + + // namespace is where the thing you're tracking is + // +required + Namespace string `json:"namespace"` + + // name is the name of the thing you're tracking + // +required + Name string `json:"name"` + + // TODO: Add validation for lastGeneration. The value for this field should generally increase, except when the associated + // resource has been deleted and re-created. To accurately validate this field, we should introduce a new UID field and only + // enforce an increasing value in lastGeneration when the UID remains unchanged. A change in the UID indicates that the resource + // was re-created, allowing the lastGeneration value to reset or decrease. + + // lastGeneration is the last generation of the workload controller involved + LastGeneration int64 `json:"lastGeneration"` + + // hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + Hash string `json:"hash"` +} + +var ( + // Available indicates that the operand is present and accessible in the cluster + OperatorStatusTypeAvailable = "Available" + // Progressing indicates that the operator is trying to transition the operand to a different state + OperatorStatusTypeProgressing = "Progressing" + // Degraded indicates that the operator (not the operand) is unable to fulfill the user intent + OperatorStatusTypeDegraded = "Degraded" + // PrereqsSatisfied indicates that the things this operator depends on are present and at levels compatible with the + // current and desired states. + OperatorStatusTypePrereqsSatisfied = "PrereqsSatisfied" + // Upgradeable indicates that the operator configuration itself (not prereqs) can be auto-upgraded by the CVO + OperatorStatusTypeUpgradeable = "Upgradeable" +) + +// OperatorCondition is just the standard condition fields. +type OperatorCondition struct { + // type of condition in CamelCase or in foo.example.com/CamelCase. + // --- + // Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + // useful (see .node.status.conditions), the ability to deconflict is important. + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +required + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + Type string `json:"type" protobuf:"bytes,1,opt,name=type"` + + // status of the condition, one of True, False, Unknown. + // +required + // +kubebuilder:validation:Enum=True;False;Unknown + Status ConditionStatus `json:"status"` + + // lastTransitionTime is the last time the condition transitioned from one status to another. + // This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + // +required + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Format=date-time + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + + Reason string `json:"reason,omitempty"` + + Message string `json:"message,omitempty"` +} + +type ConditionStatus string + +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// StaticPodOperatorSpec is spec for controllers that manage static pods. +type StaticPodOperatorSpec struct { + OperatorSpec `json:",inline"` + + // forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. + // This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work + // this time instead of failing again on the same config. + ForceRedeploymentReason string `json:"forceRedeploymentReason"` + + // failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api + // -1 = unlimited, 0 or unset = 5 (default) + FailedRevisionLimit int32 `json:"failedRevisionLimit,omitempty"` + // succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api + // -1 = unlimited, 0 or unset = 5 (default) + SucceededRevisionLimit int32 `json:"succeededRevisionLimit,omitempty"` +} + +// StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual +// node status must be tracked. +type StaticPodOperatorStatus struct { + OperatorStatus `json:",inline"` + + // latestAvailableRevisionReason describe the detailed reason for the most recent deployment + // +optional + LatestAvailableRevisionReason string `json:"latestAvailableRevisionReason,omitempty"` + + // nodeStatuses track the deployment values and errors across individual nodes + // +listType=map + // +listMapKey=nodeName + // +optional + NodeStatuses []NodeStatus `json:"nodeStatuses,omitempty"` +} + +// NodeStatus provides information about the current state of a particular node managed by this operator. +type NodeStatus struct { + // nodeName is the name of the node + // +required + NodeName string `json:"nodeName"` + + // currentRevision is the generation of the most recently successful deployment + CurrentRevision int32 `json:"currentRevision"` + // targetRevision is the generation of the deployment we're trying to apply + TargetRevision int32 `json:"targetRevision,omitempty"` + + // lastFailedRevision is the generation of the deployment we tried and failed to deploy. + LastFailedRevision int32 `json:"lastFailedRevision,omitempty"` + // lastFailedTime is the time the last failed revision failed the last time. + LastFailedTime *metav1.Time `json:"lastFailedTime,omitempty"` + // lastFailedReason is a machine readable failure reason string. + LastFailedReason string `json:"lastFailedReason,omitempty"` + // lastFailedCount is how often the installer pod of the last failed revision failed. + LastFailedCount int `json:"lastFailedCount,omitempty"` + // lastFallbackCount is how often a fallback to a previous revision happened. + LastFallbackCount int `json:"lastFallbackCount,omitempty"` + // lastFailedRevisionErrors is a list of human readable errors during the failed deployment referenced in lastFailedRevision. + // +listType=atomic + LastFailedRevisionErrors []string `json:"lastFailedRevisionErrors,omitempty"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_authentication.go b/vendor/github.com/openshift/api/operator/v1/types_authentication.go new file mode 100644 index 0000000000000..bf103f19bbb41 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_authentication.go @@ -0,0 +1,68 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=authentications,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475 +// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=authentication,operatorOrdering=01 +// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true + +// Authentication provides information to configure an operator to manage authentication. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Authentication struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +required + Spec AuthenticationSpec `json:"spec,omitempty"` + // +optional + Status AuthenticationStatus `json:"status,omitempty"` +} + +type AuthenticationSpec struct { + OperatorSpec `json:",inline"` +} + +type AuthenticationStatus struct { + // oauthAPIServer holds status specific only to oauth-apiserver + // +optional + OAuthAPIServer OAuthAPIServerStatus `json:"oauthAPIServer,omitempty"` + + OperatorStatus `json:",inline"` +} + +type OAuthAPIServerStatus struct { + // latestAvailableRevision is the latest revision used as suffix of revisioned + // secrets like encryption-config. A new revision causes a new deployment of pods. + // +optional + // +kubebuilder:validation:Minimum=0 + LatestAvailableRevision int32 `json:"latestAvailableRevision,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AuthenticationList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type AuthenticationList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Authentication `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go b/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go new file mode 100644 index 0000000000000..b6ef52e937783 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go @@ -0,0 +1,92 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=cloudcredentials,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/692 +// +openshift:capability=CloudCredential +// +openshift:file-pattern=cvoRunLevel=0000_40,operatorName=cloud-credential,operatorOrdering=00 + +// CloudCredential provides a means to configure an operator to manage CredentialsRequests. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type CloudCredential struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +required + Spec CloudCredentialSpec `json:"spec"` + // +optional + Status CloudCredentialStatus `json:"status"` +} + +// CloudCredentialsMode is the specified mode the cloud-credential-operator +// should reconcile CredentialsRequest with +// +kubebuilder:validation:Enum="";Manual;Mint;Passthrough +type CloudCredentialsMode string + +const ( + // CloudCredentialsModeManual tells cloud-credential-operator to not reconcile any CredentialsRequests + // (primarily used for the disconnected VPC use-cases). + CloudCredentialsModeManual CloudCredentialsMode = "Manual" + + // CloudCredentialsModeMint tells cloud-credential-operator to reconcile all CredentialsRequests + // by minting new users/credentials. + CloudCredentialsModeMint CloudCredentialsMode = "Mint" + + // CloudCredentialsModePassthrough tells cloud-credential-operator to reconcile all CredentialsRequests + // by copying the cloud-specific secret data. + CloudCredentialsModePassthrough CloudCredentialsMode = "Passthrough" + + // CloudCredentialsModeDefault puts CCO into the default mode of operation (per-cloud/platform defaults): + // AWS/Azure/GCP: dynamically determine cluster's cloud credential capabilities to affect + // processing of CredentialsRequests + // All other clouds/platforms (OpenStack, oVirt, vSphere, etc): run in "passthrough" mode + CloudCredentialsModeDefault CloudCredentialsMode = "" +) + +// CloudCredentialSpec is the specification of the desired behavior of the cloud-credential-operator. +type CloudCredentialSpec struct { + OperatorSpec `json:",inline"` + // credentialsMode allows informing CCO that it should not attempt to dynamically + // determine the root cloud credentials capabilities, and it should just run in + // the specified mode. + // It also allows putting the operator into "manual" mode if desired. + // Leaving the field in default mode runs CCO so that the cluster's cloud credentials + // will be dynamically probed for capabilities (on supported clouds/platforms). + // Supported modes: + // AWS/Azure/GCP: "" (Default), "Mint", "Passthrough", "Manual" + // Others: Do not set value as other platforms only support running in "Passthrough" + // +optional + CredentialsMode CloudCredentialsMode `json:"credentialsMode,omitempty"` +} + +// CloudCredentialStatus defines the observed status of the cloud-credential-operator. +type CloudCredentialStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type CloudCredentialList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []CloudCredential `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_config.go b/vendor/github.com/openshift/api/operator/v1/types_config.go new file mode 100644 index 0000000000000..f0d190e6db33e --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_config.go @@ -0,0 +1,60 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=configs,scope=Cluster,categories=coreoperators +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/612 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 + +// Config specifies the behavior of the config operator which is responsible for creating the initial configuration of other components +// on the cluster. The operator also handles installation, migration or synchronization of cloud configurations for AWS and Azure cloud based clusters +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Config struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Config Operator. + // +required + Spec ConfigSpec `json:"spec"` + + // status defines the observed status of the Config Operator. + // +optional + Status ConfigStatus `json:"status"` +} + +type ConfigSpec struct { + OperatorSpec `json:",inline"` +} + +type ConfigStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConfigList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ConfigList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []Config `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_console.go b/vendor/github.com/openshift/api/operator/v1/types_console.go new file mode 100644 index 0000000000000..68d9daa4501d5 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_console.go @@ -0,0 +1,466 @@ +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + authorizationv1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=consoles,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/486 +// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=console,operatorOrdering=01 + +// Console provides a means to configure an operator to manage the console. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Console struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +required + Spec ConsoleSpec `json:"spec,omitempty"` + // +optional + Status ConsoleStatus `json:"status,omitempty"` +} + +// ConsoleSpec is the specification of the desired behavior of the Console. +type ConsoleSpec struct { + OperatorSpec `json:",inline"` + // customization is used to optionally provide a small set of + // customization options to the web console. + // +optional + Customization ConsoleCustomization `json:"customization"` + // providers contains configuration for using specific service providers. + Providers ConsoleProviders `json:"providers"` + // route contains hostname and secret reference that contains the serving certificate. + // If a custom route is specified, a new route will be created with the + // provided hostname, under which console will be available. + // In case of custom hostname uses the default routing suffix of the cluster, + // the Secret specification for a serving certificate will not be needed. + // In case of custom hostname points to an arbitrary domain, manual DNS configurations steps are necessary. + // The default console route will be maintained to reserve the default hostname + // for console if the custom route is removed. + // If not specified, default route will be used. + // DEPRECATED + // +optional + Route ConsoleConfigRoute `json:"route"` + // plugins defines a list of enabled console plugin names. + // +optional + Plugins []string `json:"plugins,omitempty"` + // ingress allows to configure the alternative ingress for the console. + // This field is intended for clusters without ingress capability, + // where access to routes is not possible. + // +optional + Ingress Ingress `json:"ingress"` +} + +// ConsoleConfigRoute holds information on external route access to console. +// DEPRECATED +type ConsoleConfigRoute struct { + // hostname is the desired custom domain under which console will be available. + Hostname string `json:"hostname"` + // secret points to secret in the openshift-config namespace that contains custom + // certificate and key and needs to be created manually by the cluster admin. + // Referenced Secret is required to contain following key value pairs: + // - "tls.crt" - to specifies custom certificate + // - "tls.key" - to specifies private key of the custom certificate + // If the custom hostname uses the default routing suffix of the cluster, + // the Secret specification for a serving certificate will not be needed. + // +optional + Secret configv1.SecretNameReference `json:"secret"` +} + +// ConsoleStatus defines the observed status of the Console. +type ConsoleStatus struct { + OperatorStatus `json:",inline"` +} + +// ConsoleProviders defines a list of optional additional providers of +// functionality to the console. +type ConsoleProviders struct { + // statuspage contains ID for statuspage.io page that provides status info about. + // +optional + Statuspage *StatuspageProvider `json:"statuspage,omitempty"` +} + +// StatuspageProvider provides identity for statuspage account. +type StatuspageProvider struct { + // pageID is the unique ID assigned by Statuspage for your page. This must be a public page. + PageID string `json:"pageID"` +} + +// ConsoleCapabilityName defines name of UI capability in the console UI. +type ConsoleCapabilityName string + +const ( + // lightspeedButton is the name for the Lightspeed button HTML element. + LightspeedButton ConsoleCapabilityName = "LightspeedButton" + + // gettingStartedBanner is the name of the 'Getting started resources' banner in the console UI Overview page. + GettingStartedBanner ConsoleCapabilityName = "GettingStartedBanner" +) + +// CapabilityState defines the state of the capability in the console UI. +type CapabilityState string + +const ( + // "Enabled" means that the capability will be rendered in the console UI. + CapabilityEnabled CapabilityState = "Enabled" + // "Disabled" means that the capability will not be rendered in the console UI. + CapabilityDisabled CapabilityState = "Disabled" +) + +// CapabilityVisibility defines the criteria to enable/disable a capability. +// +union +type CapabilityVisibility struct { + // state defines if the capability is enabled or disabled in the console UI. + // Enabling the capability in the console UI is represented by the "Enabled" value. + // Disabling the capability in the console UI is represented by the "Disabled" value. + // +unionDiscriminator + // +kubebuilder:validation:Enum:="Enabled";"Disabled" + // +required + State CapabilityState `json:"state"` +} + +// Capabilities contains set of UI capabilities and their state in the console UI. +type Capability struct { + // name is the unique name of a capability. + // Available capabilities are LightspeedButton and GettingStartedBanner. + // +kubebuilder:validation:Enum:="LightspeedButton";"GettingStartedBanner" + // +required + Name ConsoleCapabilityName `json:"name"` + // visibility defines the visibility state of the capability. + // +required + Visibility CapabilityVisibility `json:"visibility"` +} + +// ConsoleCustomization defines a list of optional configuration for the console UI. +type ConsoleCustomization struct { + // capabilities defines an array of capabilities that can be interacted with in the console UI. + // Each capability defines a visual state that can be interacted with the console to render in the UI. + // Available capabilities are LightspeedButton and GettingStartedBanner. + // Each of the available capabilities may appear only once in the list. + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=2 + // +listType=map + // +listMapKey=name + // +optional + Capabilities []Capability `json:"capabilities,omitempty"` + // brand is the default branding of the web console which can be overridden by + // providing the brand field. There is a limited set of specific brand options. + // This field controls elements of the console such as the logo. + // Invalid value will prevent a console rollout. + // +kubebuilder:validation:Enum:=openshift;okd;online;ocp;dedicated;azure;OpenShift;OKD;Online;OCP;Dedicated;Azure;ROSA + Brand Brand `json:"brand,omitempty"` + // documentationBaseURL links to external documentation are shown in various sections + // of the web console. Providing documentationBaseURL will override the default + // documentation URL. + // Invalid value will prevent a console rollout. + // +kubebuilder:validation:Pattern=`^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))\/$` + DocumentationBaseURL string `json:"documentationBaseURL,omitempty"` + // customProductName is the name that will be displayed in page titles, logo alt text, and the about dialog + // instead of the normal OpenShift product name. + // +optional + CustomProductName string `json:"customProductName,omitempty"` + // customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a + // ConfigMap in the openshift-config namespace. This can be created with a command like + // 'oc create configmap custom-logo --from-file=/path/to/file -n openshift-config'. + // Image size must be less than 1 MB due to constraints on the ConfigMap size. + // The ConfigMap key should include a file extension so that the console serves the file + // with the correct MIME type. + // Recommended logo specifications: + // Dimensions: Max height of 68px and max width of 200px + // SVG format preferred + // +optional + CustomLogoFile configv1.ConfigMapFileReference `json:"customLogoFile,omitempty"` + // developerCatalog allows to configure the shown developer catalog categories (filters) and types (sub-catalogs). + // +optional + DeveloperCatalog DeveloperConsoleCatalogCustomization `json:"developerCatalog,omitempty"` + // projectAccess allows customizing the available list of ClusterRoles in the Developer perspective + // Project access page which can be used by a project admin to specify roles to other users and + // restrict access within the project. If set, the list will replace the default ClusterRole options. + // +optional + ProjectAccess ProjectAccess `json:"projectAccess,omitempty"` + // quickStarts allows customization of available ConsoleQuickStart resources in console. + // +optional + QuickStarts QuickStarts `json:"quickStarts,omitempty"` + // addPage allows customizing actions on the Add page in developer perspective. + // +optional + AddPage AddPage `json:"addPage,omitempty"` + // perspectives allows enabling/disabling of perspective(s) that user can see in the Perspective switcher dropdown. + // +listType=map + // +listMapKey=id + // +optional + Perspectives []Perspective `json:"perspectives"` +} + +// ProjectAccess contains options for project access roles +type ProjectAccess struct { + // availableClusterRoles is the list of ClusterRole names that are assignable to users + // through the project access tab. + // +optional + AvailableClusterRoles []string `json:"availableClusterRoles,omitempty"` +} + +// CatalogTypesState defines the state of the catalog types based on which the types will be enabled or disabled. +type CatalogTypesState string + +const ( + CatalogTypeEnabled CatalogTypesState = "Enabled" + CatalogTypeDisabled CatalogTypesState = "Disabled" +) + +// DeveloperConsoleCatalogTypes defines the state of the sub-catalog types. +// +kubebuilder:validation:XValidation:rule="self.state == 'Enabled' ? true : !has(self.enabled)",message="enabled is forbidden when state is not Enabled" +// +kubebuilder:validation:XValidation:rule="self.state == 'Disabled' ? true : !has(self.disabled)",message="disabled is forbidden when state is not Disabled" +// +union +type DeveloperConsoleCatalogTypes struct { + // state defines if a list of catalog types should be enabled or disabled. + // +unionDiscriminator + // +kubebuilder:validation:Enum:="Enabled";"Disabled"; + // +kubebuilder:default:="Enabled" + // +default="Enabled" + // +required + State CatalogTypesState `json:"state,omitempty"` + // enabled is a list of developer catalog types (sub-catalogs IDs) that will be shown to users. + // Types (sub-catalogs) are added via console plugins, the available types (sub-catalog IDs) are available + // in the console on the cluster configuration page, or when editing the YAML in the console. + // Example: "Devfile", "HelmChart", "BuilderImage" + // If the list is non-empty, a new type will not be shown to the user until it is added to list. + // If the list is empty the complete developer catalog will be shown. + // +listType=set + // +unionMember,optional + Enabled *[]string `json:"enabled,omitempty"` + // disabled is a list of developer catalog types (sub-catalogs IDs) that are not shown to users. + // Types (sub-catalogs) are added via console plugins, the available types (sub-catalog IDs) are available + // in the console on the cluster configuration page, or when editing the YAML in the console. + // Example: "Devfile", "HelmChart", "BuilderImage" + // If the list is empty or all the available sub-catalog types are added, then the complete developer catalog should be hidden. + // +listType=set + // +unionMember,optional + Disabled *[]string `json:"disabled,omitempty"` +} + +// DeveloperConsoleCatalogCustomization allow cluster admin to configure developer catalog. +type DeveloperConsoleCatalogCustomization struct { + // categories which are shown in the developer catalog. + // +optional + Categories []DeveloperConsoleCatalogCategory `json:"categories,omitempty"` + // types allows enabling or disabling of sub-catalog types that user can see in the Developer catalog. + // When omitted, all the sub-catalog types will be shown. + // +optional + Types DeveloperConsoleCatalogTypes `json:"types,omitempty"` +} + +// DeveloperConsoleCatalogCategoryMeta are the key identifiers of a developer catalog category. +type DeveloperConsoleCatalogCategoryMeta struct { + // id is an identifier used in the URL to enable deep linking in console. + // ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=32 + // +kubebuilder:validation:Pattern=`^[A-Za-z0-9-_]+$` + // +required + ID string `json:"id"` + // label defines a category display label. It is required and must have 1-64 characters. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=64 + // +required + Label string `json:"label"` + // tags is a list of strings that will match the category. A selected category + // show all items which has at least one overlapping tag between category and item. + // +optional + Tags []string `json:"tags,omitempty"` +} + +// DeveloperConsoleCatalogCategory for the developer console catalog. +type DeveloperConsoleCatalogCategory struct { + // defines top level category ID, label and filter tags. + DeveloperConsoleCatalogCategoryMeta `json:",inline"` + // subcategories defines a list of child categories. + // +optional + Subcategories []DeveloperConsoleCatalogCategoryMeta `json:"subcategories,omitempty"` +} + +// QuickStarts allow cluster admins to customize available ConsoleQuickStart resources. +type QuickStarts struct { + // disabled is a list of ConsoleQuickStart resource names that are not shown to users. + // +optional + Disabled []string `json:"disabled,omitempty"` +} + +// AddPage allows customizing actions on the Add page in developer perspective. +type AddPage struct { + // disabledActions is a list of actions that are not shown to users. + // Each action in the list is represented by its ID. + // +kubebuilder:validation:MinItems=1 + // +optional + DisabledActions []string `json:"disabledActions,omitempty"` +} + +// PerspectiveState defines the visibility state of the perspective. "Enabled" means the perspective is shown. +// "Disabled" means the Perspective is hidden. +// "AccessReview" means access review check is required to show or hide a Perspective. +type PerspectiveState string + +const ( + PerspectiveEnabled PerspectiveState = "Enabled" + PerspectiveDisabled PerspectiveState = "Disabled" + PerspectiveAccessReview PerspectiveState = "AccessReview" +) + +// ResourceAttributesAccessReview defines the visibility of the perspective depending on the access review checks. +// `required` and `missing` can work together esp. in the case where the cluster admin +// wants to show another perspective to users without specific permissions. Out of `required` and `missing` atleast one property should be non-empty. +// +kubebuilder:validation:MinProperties:=1 +type ResourceAttributesAccessReview struct { + // required defines a list of permission checks. The perspective will only be shown when all checks are successful. When omitted, the access review is skipped and the perspective will not be shown unless it is required to do so based on the configuration of the missing access review list. + // +optional + Required []authorizationv1.ResourceAttributes `json:"required"` + // missing defines a list of permission checks. The perspective will only be shown when at least one check fails. When omitted, the access review is skipped and the perspective will not be shown unless it is required to do so based on the configuration of the required access review list. + // +optional + Missing []authorizationv1.ResourceAttributes `json:"missing"` +} + +// PerspectiveVisibility defines the criteria to show/hide a perspective +// +kubebuilder:validation:XValidation:rule="self.state == 'AccessReview' ? has(self.accessReview) : !has(self.accessReview)",message="accessReview configuration is required when state is AccessReview, and forbidden otherwise" +// +union +type PerspectiveVisibility struct { + // state defines the perspective is enabled or disabled or access review check is required. + // +unionDiscriminator + // +kubebuilder:validation:Enum:="Enabled";"Disabled";"AccessReview" + // +required + State PerspectiveState `json:"state"` + // accessReview defines required and missing access review checks. + // +optional + AccessReview *ResourceAttributesAccessReview `json:"accessReview,omitempty"` +} + +// Perspective defines a perspective that cluster admins want to show/hide in the perspective switcher dropdown +// +kubebuilder:validation:XValidation:rule="has(self.id) && self.id != 'dev'? !has(self.pinnedResources) : true",message="pinnedResources is allowed only for dev and forbidden for other perspectives" +// +optional +type Perspective struct { + // id defines the id of the perspective. + // Example: "dev", "admin". + // The available perspective ids can be found in the code snippet section next to the yaml editor. + // Incorrect or unknown ids will be ignored. + // +required + ID string `json:"id"` + // visibility defines the state of perspective along with access review checks if needed for that perspective. + // +required + Visibility PerspectiveVisibility `json:"visibility"` + // pinnedResources defines the list of default pinned resources that users will see on the perspective navigation if they have not customized these pinned resources themselves. + // The list of available Kubernetes resources could be read via `kubectl api-resources`. + // The console will also provide a configuration UI and a YAML snippet that will list the available resources that can be pinned to the navigation. + // Incorrect or unknown resources will be ignored. + // +kubebuilder:validation:MaxItems=100 + // +optional + PinnedResources *[]PinnedResourceReference `json:"pinnedResources,omitempty"` +} + +// PinnedResourceReference includes the group, version and type of resource +type PinnedResourceReference struct { + // group is the API Group of the Resource. + // Enter empty string for the core group. + // This value should consist of only lowercase alphanumeric characters, hyphens and periods. + // Example: "", "apps", "build.openshift.io", etc. + // +kubebuilder:validation:Pattern:="^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$" + // +required + Group string `json:"group"` + // version is the API Version of the Resource. + // This value should consist of only lowercase alphanumeric characters. + // Example: "v1", "v1beta1", etc. + // +kubebuilder:validation:Pattern:="^[a-z0-9]+$" + // +required + Version string `json:"version"` + // resource is the type that is being referenced. + // It is normally the plural form of the resource kind in lowercase. + // This value should consist of only lowercase alphanumeric characters and hyphens. + // Example: "deployments", "deploymentconfigs", "pods", etc. + // +kubebuilder:validation:Pattern:="^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" + // +required + Resource string `json:"resource"` +} + +// Brand is a specific supported brand within the console. +type Brand string + +const ( + // Legacy branding for OpenShift + BrandOpenShiftLegacy Brand = "openshift" + // Legacy branding for The Origin Community Distribution of Kubernetes + BrandOKDLegacy Brand = "okd" + // Legacy branding for OpenShift Online + BrandOnlineLegacy Brand = "online" + // Legacy branding for OpenShift Container Platform + BrandOCPLegacy Brand = "ocp" + // Legacy branding for OpenShift Dedicated + BrandDedicatedLegacy Brand = "dedicated" + // Legacy branding for Azure Red Hat OpenShift + BrandAzureLegacy Brand = "azure" + // Branding for OpenShift + BrandOpenShift Brand = "OpenShift" + // Branding for The Origin Community Distribution of Kubernetes + BrandOKD Brand = "OKD" + // Branding for OpenShift Online + BrandOnline Brand = "Online" + // Branding for OpenShift Container Platform + BrandOCP Brand = "OCP" + // Branding for OpenShift Dedicated + BrandDedicated Brand = "Dedicated" + // Branding for Azure Red Hat OpenShift + BrandAzure Brand = "Azure" + // Branding for Red Hat OpenShift Service on AWS + BrandROSA Brand = "ROSA" +) + +// Ingress allows cluster admin to configure alternative ingress for the console. +type Ingress struct { + // consoleURL is a URL to be used as the base console address. + // If not specified, the console route hostname will be used. + // This field is required for clusters without ingress capability, + // where access to routes is not possible. + // Make sure that appropriate ingress is set up at this URL. + // The console operator will monitor the URL and may go degraded + // if it's unreachable for an extended period. + // Must use the HTTPS scheme. + // +optional + // +kubebuilder:validation:XValidation:rule="size(self) == 0 || isURL(self)",message="console url must be a valid absolute URL" + // +kubebuilder:validation:XValidation:rule="size(self) == 0 || url(self).getScheme() == 'https'",message="console url scheme must be https" + // +kubebuilder:validation:MaxLength=1024 + ConsoleURL string `json:"consoleURL"` + // clientDownloadsURL is a URL to be used as the address to download client binaries. + // If not specified, the downloads route hostname will be used. + // This field is required for clusters without ingress capability, + // where access to routes is not possible. + // The console operator will monitor the URL and may go degraded + // if it's unreachable for an extended period. + // Must use the HTTPS scheme. + // +optional + // +kubebuilder:validation:XValidation:rule="size(self) == 0 || isURL(self)",message="client downloads url must be a valid absolute URL" + // +kubebuilder:validation:XValidation:rule="size(self) == 0 || url(self).getScheme() == 'https'",message="client downloads url scheme must be https" + // +kubebuilder:validation:MaxLength=1024 + ClientDownloadsURL string `json:"clientDownloadsURL"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ConsoleList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []Console `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go new file mode 100644 index 0000000000000..731323750a4d0 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go @@ -0,0 +1,393 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ClusterCSIDriver is used to manage and configure CSI driver installed by default +// in OpenShift. An example configuration may look like: +// apiVersion: operator.openshift.io/v1 +// kind: "ClusterCSIDriver" +// metadata: +// name: "ebs.csi.aws.com" +// spec: +// logLevel: Debug + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=clustercsidrivers,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/701 +// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=csi-driver,operatorOrdering=01 + +// ClusterCSIDriver object allows management and configuration of a CSI driver operator +// installed by default in OpenShift. Name of the object must be name of the CSI driver +// it operates. See CSIDriverName type for list of allowed values. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterCSIDriver struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec ClusterCSIDriverSpec `json:"spec"` + + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ClusterCSIDriverStatus `json:"status"` +} + +// CSIDriverName is the name of the CSI driver +type CSIDriverName string + +// +kubebuilder:validation:Enum="";Managed;Unmanaged;Removed +// StorageClassStateName defines various configuration states for storageclass management +// and reconciliation by CSI operator. +type StorageClassStateName string + +const ( + // ManagedStorageClass means that the operator is actively managing its storage classes. + // Most manual changes made by cluster admin to storageclass will be wiped away by CSI + // operator if StorageClassState is set to Managed. + ManagedStorageClass StorageClassStateName = "Managed" + // UnmanagedStorageClass means that the operator is not actively managing storage classes. + // If StorageClassState is Unmanaged then CSI operator will not be actively reconciling storage class + // it previously created. This can be useful if cluster admin wants to modify storage class installed + // by CSI operator. + UnmanagedStorageClass StorageClassStateName = "Unmanaged" + // RemovedStorageClass instructs the operator to remove the storage class. + // If StorageClassState is Removed - CSI operator will delete storage classes it created + // previously. This can be useful in clusters where cluster admins want to prevent + // creation of dynamically provisioned volumes but still need rest of the features + // provided by CSI operator and driver. + RemovedStorageClass StorageClassStateName = "Removed" +) + +// If you are adding a new driver name here, ensure that 0000_50_cluster_csi_driver_01_config.crd.yaml-merge-patch file is also updated with new driver name. +const ( + AWSEBSCSIDriver CSIDriverName = "ebs.csi.aws.com" + AWSEFSCSIDriver CSIDriverName = "efs.csi.aws.com" + AzureDiskCSIDriver CSIDriverName = "disk.csi.azure.com" + AzureFileCSIDriver CSIDriverName = "file.csi.azure.com" + GCPFilestoreCSIDriver CSIDriverName = "filestore.csi.storage.gke.io" + GCPPDCSIDriver CSIDriverName = "pd.csi.storage.gke.io" + CinderCSIDriver CSIDriverName = "cinder.csi.openstack.org" + VSphereCSIDriver CSIDriverName = "csi.vsphere.vmware.com" + ManilaCSIDriver CSIDriverName = "manila.csi.openstack.org" + OvirtCSIDriver CSIDriverName = "csi.ovirt.org" + KubevirtCSIDriver CSIDriverName = "csi.kubevirt.io" + SharedResourcesCSIDriver CSIDriverName = "csi.sharedresource.openshift.io" + AlibabaDiskCSIDriver CSIDriverName = "diskplugin.csi.alibabacloud.com" + IBMVPCBlockCSIDriver CSIDriverName = "vpc.block.csi.ibm.io" + IBMPowerVSBlockCSIDriver CSIDriverName = "powervs.csi.ibm.com" + SecretsStoreCSIDriver CSIDriverName = "secrets-store.csi.k8s.io" + SambaCSIDriver CSIDriverName = "smb.csi.k8s.io" +) + +// ClusterCSIDriverSpec is the desired behavior of CSI driver operator +type ClusterCSIDriverSpec struct { + OperatorSpec `json:",inline"` + // storageClassState determines if CSI operator should create and manage storage classes. + // If this field value is empty or Managed - CSI operator will continuously reconcile + // storage class and create if necessary. + // If this field value is Unmanaged - CSI operator will not reconcile any previously created + // storage class. + // If this field value is Removed - CSI operator will delete the storage class it created previously. + // When omitted, this means the user has no opinion and the platform chooses a reasonable default, + // which is subject to change over time. + // The current default behaviour is Managed. + // +optional + StorageClassState StorageClassStateName `json:"storageClassState,omitempty"` + + // driverConfig can be used to specify platform specific driver configuration. + // When omitted, this means no opinion and the platform is left to choose reasonable + // defaults. These defaults are subject to change over time. + // +optional + DriverConfig CSIDriverConfigSpec `json:"driverConfig"` +} + +// CSIDriverType indicates type of CSI driver being configured. +// +kubebuilder:validation:Enum="";AWS;Azure;GCP;IBMCloud;vSphere +type CSIDriverType string + +const ( + AWSDriverType CSIDriverType = "AWS" + AzureDriverType CSIDriverType = "Azure" + GCPDriverType CSIDriverType = "GCP" + IBMCloudDriverType CSIDriverType = "IBMCloud" + VSphereDriverType CSIDriverType = "vSphere" +) + +// CSIDriverConfigSpec defines configuration spec that can be +// used to optionally configure a specific CSI Driver. +// +kubebuilder:validation:XValidation:rule="has(self.driverType) && self.driverType == 'IBMCloud' ? has(self.ibmcloud) : !has(self.ibmcloud)",message="ibmcloud must be set if driverType is 'IBMCloud', but remain unset otherwise" +// +union +type CSIDriverConfigSpec struct { + // driverType indicates type of CSI driver for which the + // driverConfig is being applied to. + // Valid values are: AWS, Azure, GCP, IBMCloud, vSphere and omitted. + // Consumers should treat unknown values as a NO-OP. + // +required + // +unionDiscriminator + DriverType CSIDriverType `json:"driverType"` + + // aws is used to configure the AWS CSI driver. + // +optional + AWS *AWSCSIDriverConfigSpec `json:"aws,omitempty"` + + // azure is used to configure the Azure CSI driver. + // +optional + Azure *AzureCSIDriverConfigSpec `json:"azure,omitempty"` + + // gcp is used to configure the GCP CSI driver. + // +optional + GCP *GCPCSIDriverConfigSpec `json:"gcp,omitempty"` + + // ibmcloud is used to configure the IBM Cloud CSI driver. + // +optional + IBMCloud *IBMCloudCSIDriverConfigSpec `json:"ibmcloud,omitempty"` + + // vSphere is used to configure the vsphere CSI driver. + // +optional + VSphere *VSphereCSIDriverConfigSpec `json:"vSphere,omitempty"` +} + +// AWSCSIDriverConfigSpec defines properties that can be configured for the AWS CSI driver. +type AWSCSIDriverConfigSpec struct { + // kmsKeyARN sets the cluster default storage class to encrypt volumes with a user-defined KMS key, + // rather than the default KMS key used by AWS. + // The value may be either the ARN or Alias ARN of a KMS key. + // +kubebuilder:validation:Pattern:=`^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b|aws-iso-e|aws-iso-f):kms:[a-z0-9-]+:[0-9]{12}:(key|alias)\/.*$` + // +optional + KMSKeyARN string `json:"kmsKeyARN,omitempty"` + + // efsVolumeMetrics sets the configuration for collecting metrics from EFS volumes used by the EFS CSI Driver. + // +openshift:enable:FeatureGate=AWSEFSDriverVolumeMetrics + // +optional + EFSVolumeMetrics *AWSEFSVolumeMetrics `json:"efsVolumeMetrics,omitempty"` +} + +// AWSEFSVolumeMetricsState defines the modes for collecting volume metrics in the AWS EFS CSI Driver. +// This can either enable recursive collection of volume metrics or disable metric collection entirely. +// +kubebuilder:validation:Enum:="RecursiveWalk";"Disabled" +type AWSEFSVolumeMetricsState string + +const ( + // AWSEFSVolumeMetricsRecursiveWalk indicates that volume metrics collection in the AWS EFS CSI Driver + // is performed by recursively walking through the files in the volume. + AWSEFSVolumeMetricsRecursiveWalk AWSEFSVolumeMetricsState = "RecursiveWalk" + + // AWSEFSVolumeMetricsDisabled indicates that volume metrics collection in the AWS EFS CSI Driver is disabled. + AWSEFSVolumeMetricsDisabled AWSEFSVolumeMetricsState = "Disabled" +) + +// AWSEFSVolumeMetrics defines the configuration for volume metrics in the EFS CSI Driver. +// +union +type AWSEFSVolumeMetrics struct { + // state defines the state of metric collection in the AWS EFS CSI Driver. + // This field is required and must be set to one of the following values: Disabled or RecursiveWalk. + // Disabled means no metrics collection will be performed. This is the default value. + // RecursiveWalk means the AWS EFS CSI Driver will recursively scan volumes to collect metrics. + // This process may result in high CPU and memory usage, depending on the volume size. + // +unionDiscriminator + // +required + State AWSEFSVolumeMetricsState `json:"state"` + + // recursiveWalk provides additional configuration for collecting volume metrics in the AWS EFS CSI Driver + // when the state is set to RecursiveWalk. + // +unionMember + // +optional + RecursiveWalk *AWSEFSVolumeMetricsRecursiveWalkConfig `json:"recursiveWalk,omitempty"` +} + +// AWSEFSVolumeMetricsRecursiveWalkConfig defines options for volume metrics in the EFS CSI Driver. +type AWSEFSVolumeMetricsRecursiveWalkConfig struct { + // refreshPeriodMinutes specifies the frequency, in minutes, at which volume metrics are refreshed. + // When omitted, this means no opinion and the platform is left to choose a reasonable + // default, which is subject to change over time. The current default is 240. + // The valid range is from 1 to 43200 minutes (30 days). + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=43200 + // +optional + RefreshPeriodMinutes int32 `json:"refreshPeriodMinutes,omitempty"` + + // fsRateLimit defines the rate limit, in goroutines per file system, for processing volume metrics. + // When omitted, this means no opinion and the platform is left to choose a reasonable + // default, which is subject to change over time. The current default is 5. + // The valid range is from 1 to 100 goroutines. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=100 + // +optional + FSRateLimit int32 `json:"fsRateLimit,omitempty"` +} + +// AzureDiskEncryptionSet defines the configuration for a disk encryption set. +type AzureDiskEncryptionSet struct { + // subscriptionID defines the Azure subscription that contains the disk encryption set. + // The value should meet the following conditions: + // 1. It should be a 128-bit number. + // 2. It should be 36 characters (32 hexadecimal characters and 4 hyphens) long. + // 3. It should be displayed in five groups separated by hyphens (-). + // 4. The first group should be 8 characters long. + // 5. The second, third, and fourth groups should be 4 characters long. + // 6. The fifth group should be 12 characters long. + // An Example SubscrionID: f2007bbf-f802-4a47-9336-cf7c6b89b378 + // +required + // +kubebuilder:validation:MaxLength:=36 + // +kubebuilder:validation:Pattern:=`^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$` + SubscriptionID string `json:"subscriptionID"` + + // resourceGroup defines the Azure resource group that contains the disk encryption set. + // The value should consist of only alphanumberic characters, + // underscores (_), parentheses, hyphens and periods. + // The value should not end in a period and be at most 90 characters in + // length. + // +required + // +kubebuilder:validation:MaxLength:=90 + // +kubebuilder:validation:Pattern:=`^[\w\.\-\(\)]*[\w\-\(\)]$` + ResourceGroup string `json:"resourceGroup"` + + // name is the name of the disk encryption set that will be set on the default storage class. + // The value should consist of only alphanumberic characters, + // underscores (_), hyphens, and be at most 80 characters in length. + // +required + // +kubebuilder:validation:MaxLength:=80 + // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$` + Name string `json:"name"` +} + +// AzureCSIDriverConfigSpec defines properties that can be configured for the Azure CSI driver. +type AzureCSIDriverConfigSpec struct { + // diskEncryptionSet sets the cluster default storage class to encrypt volumes with a + // customer-managed encryption set, rather than the default platform-managed keys. + // +optional + DiskEncryptionSet *AzureDiskEncryptionSet `json:"diskEncryptionSet,omitempty"` +} + +// GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key +type GCPKMSKeyReference struct { + // name is the name of the customer-managed encryption key to be used for disk encryption. + // The value should correspond to an existing KMS key and should + // consist of only alphanumeric characters, hyphens (-) and underscores (_), + // and be at most 63 characters in length. + // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$` + // +kubebuilder:validation:MinLength:=1 + // +kubebuilder:validation:MaxLength:=63 + // +required + Name string `json:"name"` + + // keyRing is the name of the KMS Key Ring which the KMS Key belongs to. + // The value should correspond to an existing KMS key ring and should + // consist of only alphanumeric characters, hyphens (-) and underscores (_), + // and be at most 63 characters in length. + // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$` + // +kubebuilder:validation:MinLength:=1 + // +kubebuilder:validation:MaxLength:=63 + // +required + KeyRing string `json:"keyRing"` + + // projectID is the ID of the Project in which the KMS Key Ring exists. + // It must be 6 to 30 lowercase letters, digits, or hyphens. + // It must start with a letter. Trailing hyphens are prohibited. + // +kubebuilder:validation:Pattern:=`^[a-z][a-z0-9-]+[a-z0-9]$` + // +kubebuilder:validation:MinLength:=6 + // +kubebuilder:validation:MaxLength:=30 + // +required + ProjectID string `json:"projectID"` + + // location is the GCP location in which the Key Ring exists. + // The value must match an existing GCP location, or "global". + // Defaults to global, if not set. + // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$` + // +optional + Location string `json:"location,omitempty"` +} + +// GCPCSIDriverConfigSpec defines properties that can be configured for the GCP CSI driver. +type GCPCSIDriverConfigSpec struct { + // kmsKey sets the cluster default storage class to encrypt volumes with customer-supplied + // encryption keys, rather than the default keys managed by GCP. + // +optional + KMSKey *GCPKMSKeyReference `json:"kmsKey,omitempty"` +} + +// IBMCloudCSIDriverConfigSpec defines the properties that can be configured for the IBM Cloud CSI driver. +type IBMCloudCSIDriverConfigSpec struct { + // encryptionKeyCRN is the IBM Cloud CRN of the customer-managed root key to use + // for disk encryption of volumes for the default storage classes. + // +required + // +kubebuilder:validation:MaxLength:=154 + // +kubebuilder:validation:MinLength:=144 + // +kubebuilder:validation:Pattern:=`^crn:v[0-9]+:bluemix:(public|private):(kms|hs-crypto):[a-z-]+:a/[0-9a-f]+:[0-9a-f-]{36}:key:[0-9a-f-]{36}$` + EncryptionKeyCRN string `json:"encryptionKeyCRN"` +} + +// VSphereCSIDriverConfigSpec defines properties that +// can be configured for vsphere CSI driver. +type VSphereCSIDriverConfigSpec struct { + // topologyCategories indicates tag categories with which + // vcenter resources such as hostcluster or datacenter were tagged with. + // If cluster Infrastructure object has a topology, values specified in + // Infrastructure object will be used and modifications to topologyCategories + // will be rejected. + // +listType=atomic + // +optional + TopologyCategories []string `json:"topologyCategories,omitempty"` + + // globalMaxSnapshotsPerBlockVolume is a global configuration parameter that applies to volumes on all kinds of + // datastores. If omitted, the platform chooses a default, which is subject to change over time, currently that default is 3. + // Snapshots can not be disabled using this parameter. + // Increasing number of snapshots above 3 can have negative impact on performance, for more details see: https://kb.vmware.com/s/article/1025279 + // Volume snapshot documentation: https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/3.0/vmware-vsphere-csp-getting-started/GUID-E0B41C69-7EEB-450F-A73D-5FD2FF39E891.html + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=32 + // +openshift:enable:FeatureGate=VSphereDriverConfiguration + // +optional + GlobalMaxSnapshotsPerBlockVolume *uint32 `json:"globalMaxSnapshotsPerBlockVolume,omitempty"` + + // granularMaxSnapshotsPerBlockVolumeInVSAN is a granular configuration parameter on vSAN datastore only. It + // overrides GlobalMaxSnapshotsPerBlockVolume if set, while it falls back to the global constraint if unset. + // Snapshots for VSAN can not be disabled using this parameter. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=32 + // +openshift:enable:FeatureGate=VSphereDriverConfiguration + // +optional + GranularMaxSnapshotsPerBlockVolumeInVSAN *uint32 `json:"granularMaxSnapshotsPerBlockVolumeInVSAN,omitempty"` + + // granularMaxSnapshotsPerBlockVolumeInVVOL is a granular configuration parameter on Virtual Volumes datastore only. + // It overrides GlobalMaxSnapshotsPerBlockVolume if set, while it falls back to the global constraint if unset. + // Snapshots for VVOL can not be disabled using this parameter. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=32 + // +openshift:enable:FeatureGate=VSphereDriverConfiguration + // +optional + GranularMaxSnapshotsPerBlockVolumeInVVOL *uint32 `json:"granularMaxSnapshotsPerBlockVolumeInVVOL,omitempty"` +} + +// ClusterCSIDriverStatus is the observed status of CSI driver operator +type ClusterCSIDriverStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterCSIDriverList contains a list of ClusterCSIDriver +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterCSIDriverList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + Items []ClusterCSIDriver `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go b/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go new file mode 100644 index 0000000000000..d6d283d3652ab --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go @@ -0,0 +1,60 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=csisnapshotcontrollers,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/562 +// +openshift:file-pattern=cvoRunLevel=0000_80,operatorName=csi-snapshot-controller,operatorOrdering=01 + +// CSISnapshotController provides a means to configure an operator to manage the CSI snapshots. `cluster` is the canonical name. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type CSISnapshotController struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec CSISnapshotControllerSpec `json:"spec"` + + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status CSISnapshotControllerStatus `json:"status"` +} + +// CSISnapshotControllerSpec is the specification of the desired behavior of the CSISnapshotController operator. +type CSISnapshotControllerSpec struct { + OperatorSpec `json:",inline"` +} + +// CSISnapshotControllerStatus defines the observed status of the CSISnapshotController operator. +type CSISnapshotControllerStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CSISnapshotControllerList contains a list of CSISnapshotControllers. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type CSISnapshotControllerList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + Items []CSISnapshotController `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_dns.go b/vendor/github.com/openshift/api/operator/v1/types_dns.go new file mode 100644 index 0000000000000..258804786844a --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_dns.go @@ -0,0 +1,525 @@ +package v1 + +import ( + v1 "github.com/openshift/api/config/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=dnses,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475 +// +openshift:file-pattern=cvoRunLevel=0000_70,operatorName=dns,operatorOrdering=00 + +// DNS manages the CoreDNS component to provide a name resolution service +// for pods and services in the cluster. +// +// This supports the DNS-based service discovery specification: +// https://github.com/kubernetes/dns/blob/master/docs/specification.md +// +// More details: https://kubernetes.io/docs/tasks/administer-cluster/coredns +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type DNS struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the specification of the desired behavior of the DNS. + Spec DNSSpec `json:"spec,omitempty"` + // status is the most recently observed status of the DNS. + Status DNSStatus `json:"status,omitempty"` +} + +// DNSSpec is the specification of the desired behavior of the DNS. +type DNSSpec struct { + // servers is a list of DNS resolvers that provide name query delegation for one or + // more subdomains outside the scope of the cluster domain. If servers consists of + // more than one Server, longest suffix match will be used to determine the Server. + // + // For example, if there are two Servers, one for "foo.com" and another for "a.foo.com", + // and the name query is for "www.a.foo.com", it will be routed to the Server with Zone + // "a.foo.com". + // + // If this field is nil, no servers are created. + // + // +optional + Servers []Server `json:"servers,omitempty"` + + // upstreamResolvers defines a schema for configuring CoreDNS + // to proxy DNS messages to upstream resolvers for the case of the + // default (".") server + // + // If this field is not specified, the upstream used will default to + // /etc/resolv.conf, with policy "sequential" + // + // +optional + UpstreamResolvers UpstreamResolvers `json:"upstreamResolvers"` + + // nodePlacement provides explicit control over the scheduling of DNS + // pods. + // + // Generally, it is useful to run a DNS pod on every node so that DNS + // queries are always handled by a local DNS pod instead of going over + // the network to a DNS pod on another node. However, security policies + // may require restricting the placement of DNS pods to specific nodes. + // For example, if a security policy prohibits pods on arbitrary nodes + // from communicating with the API, a node selector can be specified to + // restrict DNS pods to nodes that are permitted to communicate with the + // API. Conversely, if running DNS pods on nodes with a particular + // taint is desired, a toleration can be specified for that taint. + // + // If unset, defaults are used. See nodePlacement for more details. + // + // +optional + NodePlacement DNSNodePlacement `json:"nodePlacement,omitempty"` + + // managementState indicates whether the DNS operator should manage cluster + // DNS + // +optional + ManagementState ManagementState `json:"managementState,omitempty"` + + // operatorLogLevel controls the logging level of the DNS Operator. + // Valid values are: "Normal", "Debug", "Trace". + // Defaults to "Normal". + // setting operatorLogLevel: Trace will produce extremely verbose logs. + // +optional + // +kubebuilder:default=Normal + OperatorLogLevel DNSLogLevel `json:"operatorLogLevel,omitempty"` + + // logLevel describes the desired logging verbosity for CoreDNS. + // Any one of the following values may be specified: + // * Normal logs errors from upstream resolvers. + // * Debug logs errors, NXDOMAIN responses, and NODATA responses. + // * Trace logs errors and all responses. + // Setting logLevel: Trace will produce extremely verbose logs. + // Valid values are: "Normal", "Debug", "Trace". + // Defaults to "Normal". + // +optional + // +kubebuilder:default=Normal + LogLevel DNSLogLevel `json:"logLevel,omitempty"` + + // cache describes the caching configuration that applies to all server blocks listed in the Corefile. + // This field allows a cluster admin to optionally configure: + // * positiveTTL which is a duration for which positive responses should be cached. + // * negativeTTL which is a duration for which negative responses should be cached. + // If this is not configured, OpenShift will configure positive and negative caching with a default value that is + // subject to change. At the time of writing, the default positiveTTL is 900 seconds and the default negativeTTL is + // 30 seconds or as noted in the respective Corefile for your version of OpenShift. + // +optional + Cache DNSCache `json:"cache,omitempty"` +} + +// DNSCache defines the fields for configuring DNS caching. +type DNSCache struct { + // positiveTTL is optional and specifies the amount of time that a positive response should be cached. + // + // If configured, it must be a value of 1s (1 second) or greater up to a theoretical maximum of several years. This + // field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, + // e.g. "100s", "1m30s", "12h30m10s". Values that are fractions of a second are rounded down to the nearest second. + // If the configured value is less than 1s, the default value will be used. + // If not configured, the value will be 0s and OpenShift will use a default value of 900 seconds unless noted + // otherwise in the respective Corefile for your version of OpenShift. The default value of 900 seconds is subject + // to change. + // +kubebuilder:validation:Pattern=^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ + // +kubebuilder:validation:Type:=string + // +optional + PositiveTTL metav1.Duration `json:"positiveTTL,omitempty"` + + // negativeTTL is optional and specifies the amount of time that a negative response should be cached. + // + // If configured, it must be a value of 1s (1 second) or greater up to a theoretical maximum of several years. This + // field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, + // e.g. "100s", "1m30s", "12h30m10s". Values that are fractions of a second are rounded down to the nearest second. + // If the configured value is less than 1s, the default value will be used. + // If not configured, the value will be 0s and OpenShift will use a default value of 30 seconds unless noted + // otherwise in the respective Corefile for your version of OpenShift. The default value of 30 seconds is subject + // to change. + // +kubebuilder:validation:Pattern=^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ + // +kubebuilder:validation:Type:=string + // +optional + NegativeTTL metav1.Duration `json:"negativeTTL,omitempty"` +} + +// +kubebuilder:validation:Enum:=Normal;Debug;Trace +type DNSLogLevel string + +var ( + // Normal is the default. Normal, working log information, everything is fine, but helpful notices for auditing or common operations. In kube, this is probably glog=2. + DNSLogLevelNormal DNSLogLevel = "Normal" + + // Debug is used when something went wrong. Even common operations may be logged, and less helpful but more quantity of notices. In kube, this is probably glog=4. + DNSLogLevelDebug DNSLogLevel = "Debug" + + // Trace is used when something went really badly and even more verbose logs are needed. Logging every function call as part of a common operation, to tracing execution of a query. In kube, this is probably glog=6. + DNSLogLevelTrace DNSLogLevel = "Trace" +) + +// Server defines the schema for a server that runs per instance of CoreDNS. +type Server struct { + // name is required and specifies a unique name for the server. Name must comply + // with the Service Name Syntax of rfc6335. + Name string `json:"name"` + // zones is required and specifies the subdomains that Server is authoritative for. + // Zones must conform to the rfc1123 definition of a subdomain. Specifying the + // cluster domain (i.e., "cluster.local") is invalid. + Zones []string `json:"zones"` + // forwardPlugin defines a schema for configuring CoreDNS to proxy DNS messages + // to upstream resolvers. + ForwardPlugin ForwardPlugin `json:"forwardPlugin"` +} + +// DNSTransport indicates what type of connection should be used. +// +kubebuilder:validation:Enum=TLS;Cleartext;"" +type DNSTransport string + +const ( + // TLSTransport indicates that TLS should be used for the connection. + TLSTransport DNSTransport = "TLS" + + // CleartextTransport indicates that no encryption should be used for + // the connection. + CleartextTransport DNSTransport = "Cleartext" +) + +// DNSTransportConfig groups related configuration parameters used for configuring +// forwarding to upstream resolvers that support DNS-over-TLS. +// +union +type DNSTransportConfig struct { + // transport allows cluster administrators to opt-in to using a DNS-over-TLS + // connection between cluster DNS and an upstream resolver(s). Configuring + // TLS as the transport at this level without configuring a CABundle will + // result in the system certificates being used to verify the serving + // certificate of the upstream resolver(s). + // + // Possible values: + // "" (empty) - This means no explicit choice has been made and the platform chooses the default which is subject + // to change over time. The current default is "Cleartext". + // "Cleartext" - Cluster admin specified cleartext option. This results in the same functionality + // as an empty value but may be useful when a cluster admin wants to be more explicit about the transport, + // or wants to switch from "TLS" to "Cleartext" explicitly. + // "TLS" - This indicates that DNS queries should be sent over a TLS connection. If Transport is set to TLS, + // you MUST also set ServerName. If a port is not included with the upstream IP, port 853 will be tried by default + // per RFC 7858 section 3.1; https://datatracker.ietf.org/doc/html/rfc7858#section-3.1. + // + // +optional + // +unionDiscriminator + Transport DNSTransport `json:"transport,omitempty"` + + // tls contains the additional configuration options to use when Transport is set to "TLS". + TLS *DNSOverTLSConfig `json:"tls,omitempty"` +} + +// DNSOverTLSConfig describes optional DNSTransportConfig fields that should be captured. +type DNSOverTLSConfig struct { + // serverName is the upstream server to connect to when forwarding DNS queries. This is required when Transport is + // set to "TLS". ServerName will be validated against the DNS naming conventions in RFC 1123 and should match the + // TLS certificate installed in the upstream resolver(s). + // + // + --- + // + Inspired by the DNS1123 patterns in Kubernetes: https://github.com/kubernetes/kubernetes/blob/7c46f40bdf89a437ecdbc01df45e235b5f6d9745/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L178-L218 + // +required + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` + ServerName string `json:"serverName"` + + // caBundle references a ConfigMap that must contain either a single + // CA Certificate or a CA Bundle. This allows cluster administrators to provide their + // own CA or CA bundle for validating the certificate of upstream resolvers. + // + // 1. The configmap must contain a `ca-bundle.crt` key. + // 2. The value must be a PEM encoded CA certificate or CA bundle. + // 3. The administrator must create this configmap in the openshift-config namespace. + // 4. The upstream server certificate must contain a Subject Alternative Name (SAN) that matches ServerName. + // + // +optional + CABundle v1.ConfigMapNameReference `json:"caBundle,omitempty"` +} + +// ForwardingPolicy is the policy to use when forwarding DNS requests. +// +kubebuilder:validation:Enum=Random;RoundRobin;Sequential +type ForwardingPolicy string + +const ( + // RandomForwardingPolicy picks a random upstream server for each query. + RandomForwardingPolicy ForwardingPolicy = "Random" + + // RoundRobinForwardingPolicy picks upstream servers in a round-robin order, moving to the next server for each new query. + RoundRobinForwardingPolicy ForwardingPolicy = "RoundRobin" + + // SequentialForwardingPolicy tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query. + SequentialForwardingPolicy ForwardingPolicy = "Sequential" +) + +// ForwardPlugin defines a schema for configuring the CoreDNS forward plugin. +type ForwardPlugin struct { + // upstreams is a list of resolvers to forward name queries for subdomains of Zones. + // Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream + // returns an error during the exchange, another resolver is tried from Upstreams. The + // Upstreams are selected in the order specified in Policy. Each upstream is represented + // by an IP address or IP:port if the upstream listens on a port other than 53. + // + // A maximum of 15 upstreams is allowed per ForwardPlugin. + // + // +kubebuilder:validation:MaxItems=15 + Upstreams []string `json:"upstreams"` + + // policy is used to determine the order in which upstream servers are selected for querying. + // Any one of the following values may be specified: + // + // * "Random" picks a random upstream server for each query. + // * "RoundRobin" picks upstream servers in a round-robin order, moving to the next server for each new query. + // * "Sequential" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query. + // + // The default value is "Random" + // + // +optional + // +kubebuilder:default:="Random" + Policy ForwardingPolicy `json:"policy,omitempty"` + + // transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use + // when forwarding DNS requests to an upstream resolver. + // + // The default value is "" (empty) which results in a standard cleartext connection being used when forwarding DNS + // requests to an upstream resolver. + // + // +optional + TransportConfig DNSTransportConfig `json:"transportConfig,omitempty"` + + // protocolStrategy specifies the protocol to use for upstream DNS + // requests. + // Valid values for protocolStrategy are "TCP" and omitted. + // When omitted, this means no opinion and the platform is left to choose + // a reasonable default, which is subject to change over time. + // The current default is to use the protocol of the original client request. + // "TCP" specifies that the platform should use TCP for all upstream DNS requests, + // even if the client request uses UDP. + // "TCP" is useful for UDP-specific issues such as those created by + // non-compliant upstream resolvers, but may consume more bandwidth or + // increase DNS response time. Note that protocolStrategy only affects + // the protocol of DNS requests that CoreDNS makes to upstream resolvers. + // It does not affect the protocol of DNS requests between clients and + // CoreDNS. + // + // +optional + ProtocolStrategy ProtocolStrategy `json:"protocolStrategy"` +} + +// UpstreamResolvers defines a schema for configuring the CoreDNS forward plugin in the +// specific case of the default (".") server. +// It defers from ForwardPlugin in the default values it accepts: +// * At least one upstream should be specified. +// * the default policy is Sequential +type UpstreamResolvers struct { + // upstreams is a list of resolvers to forward name queries for the "." domain. + // Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream + // returns an error during the exchange, another resolver is tried from Upstreams. The + // Upstreams are selected in the order specified in Policy. + // + // A maximum of 15 upstreams is allowed per ForwardPlugin. + // If no Upstreams are specified, /etc/resolv.conf is used by default + // + // +optional + // +kubebuilder:validation:MaxItems=15 + // +kubebuilder:default={{"type":"SystemResolvConf"}} + Upstreams []Upstream `json:"upstreams"` + + // policy is used to determine the order in which upstream servers are selected for querying. + // Any one of the following values may be specified: + // + // * "Random" picks a random upstream server for each query. + // * "RoundRobin" picks upstream servers in a round-robin order, moving to the next server for each new query. + // * "Sequential" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query. + // + // The default value is "Sequential" + // + // +optional + // +kubebuilder:default="Sequential" + Policy ForwardingPolicy `json:"policy,omitempty"` + + // transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use + // when forwarding DNS requests to an upstream resolver. + // + // The default value is "" (empty) which results in a standard cleartext connection being used when forwarding DNS + // requests to an upstream resolver. + // + // +optional + TransportConfig DNSTransportConfig `json:"transportConfig,omitempty"` + + // protocolStrategy specifies the protocol to use for upstream DNS + // requests. + // Valid values for protocolStrategy are "TCP" and omitted. + // When omitted, this means no opinion and the platform is left to choose + // a reasonable default, which is subject to change over time. + // The current default is to use the protocol of the original client request. + // "TCP" specifies that the platform should use TCP for all upstream DNS requests, + // even if the client request uses UDP. + // "TCP" is useful for UDP-specific issues such as those created by + // non-compliant upstream resolvers, but may consume more bandwidth or + // increase DNS response time. Note that protocolStrategy only affects + // the protocol of DNS requests that CoreDNS makes to upstream resolvers. + // It does not affect the protocol of DNS requests between clients and + // CoreDNS. + // + // +optional + ProtocolStrategy ProtocolStrategy `json:"protocolStrategy"` +} + +// Upstream can either be of type SystemResolvConf, or of type Network. +// +// - For an Upstream of type SystemResolvConf, no further fields are necessary: +// The upstream will be configured to use /etc/resolv.conf. +// - For an Upstream of type Network, a NetworkResolver field needs to be defined +// with an IP address or IP:port if the upstream listens on a port other than 53. +type Upstream struct { + + // type defines whether this upstream contains an IP/IP:port resolver or the local /etc/resolv.conf. + // Type accepts 2 possible values: SystemResolvConf or Network. + // + // * When SystemResolvConf is used, the Upstream structure does not require any further fields to be defined: + // /etc/resolv.conf will be used + // * When Network is used, the Upstream structure must contain at least an Address + // + // +required + Type UpstreamType `json:"type"` + + // address must be defined when Type is set to Network. It will be ignored otherwise. + // It must be a valid ipv4 or ipv6 address. + // + // +optional + Address string `json:"address,omitempty"` + + // port may be defined when Type is set to Network. It will be ignored otherwise. + // Port must be between 65535 + // + // +optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:default=53 + Port uint32 `json:"port,omitempty"` +} + +// +kubebuilder:validation:Enum=SystemResolvConf;Network;"" +type UpstreamType string + +const ( + SystemResolveConfType UpstreamType = "SystemResolvConf" + NetworkResolverType UpstreamType = "Network" +) + +// ProtocolStrategy is a preference for the protocol to use for DNS queries. +// + --- +// + When consumers observe an unknown value, they should use the default strategy. +// +kubebuilder:validation:Enum:=TCP;"" +type ProtocolStrategy string + +var ( + // ProtocolStrategyDefault specifies no opinion for DNS protocol. + // If empty, the default behavior of CoreDNS is used. Currently, this means that CoreDNS uses the protocol of the + // originating client request as the upstream protocol. + // Note that the default behavior of CoreDNS is subject to change. + ProtocolStrategyDefault ProtocolStrategy = "" + + // ProtocolStrategyTCP instructs CoreDNS to always use TCP, regardless of the originating client's request protocol. + ProtocolStrategyTCP ProtocolStrategy = "TCP" +) + +// DNSNodePlacement describes the node scheduling configuration for DNS pods. +type DNSNodePlacement struct { + // nodeSelector is the node selector applied to DNS pods. + // + // If empty, the default is used, which is currently the following: + // + // kubernetes.io/os: linux + // + // This default is subject to change. + // + // If set, the specified selector is used and replaces the default. + // + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // tolerations is a list of tolerations applied to DNS pods. + // + // If empty, the DNS operator sets a toleration for the + // "node-role.kubernetes.io/master" taint. This default is subject to + // change. Specifying tolerations without including a toleration for + // the "node-role.kubernetes.io/master" taint may be risky as it could + // lead to an outage if all worker nodes become unavailable. + // + // Note that the daemon controller adds some tolerations as well. See + // https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + // + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +const ( + // Available indicates the DNS controller daemonset is available. + DNSAvailable = "Available" +) + +// DNSStatus defines the observed status of the DNS. +type DNSStatus struct { + // clusterIP is the service IP through which this DNS is made available. + // + // In the case of the default DNS, this will be a well known IP that is used + // as the default nameserver for pods that are using the default ClusterFirst DNS policy. + // + // In general, this IP can be specified in a pod's spec.dnsConfig.nameservers list + // or used explicitly when performing name resolution from within the cluster. + // Example: dig foo.com @ + // + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // + // +required + ClusterIP string `json:"clusterIP"` + + // clusterDomain is the local cluster DNS domain suffix for DNS services. + // This will be a subdomain as defined in RFC 1034, + // section 3.5: https://tools.ietf.org/html/rfc1034#section-3.5 + // Example: "cluster.local" + // + // More info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service + // + // +required + ClusterDomain string `json:"clusterDomain"` + + // conditions provide information about the state of the DNS on the cluster. + // + // These are the supported DNS conditions: + // + // * Available + // - True if the following conditions are met: + // * DNS controller daemonset is available. + // - False if any of those conditions are unsatisfied. + // + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []OperatorCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DNSList contains a list of DNS +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type DNSList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + Items []DNS `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_etcd.go b/vendor/github.com/openshift/api/operator/v1/types_etcd.go new file mode 100644 index 0000000000000..375ec5fb7fd50 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_etcd.go @@ -0,0 +1,96 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=etcds,scope=Cluster,categories=coreoperators +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/752 +// +openshift:file-pattern=cvoRunLevel=0000_12,operatorName=etcd,operatorOrdering=01 + +// Etcd provides information to configure an operator to manage etcd. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Etcd struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // +required + Spec EtcdSpec `json:"spec"` + // +optional + Status EtcdStatus `json:"status"` +} + +type EtcdSpec struct { + StaticPodOperatorSpec `json:",inline"` + // HardwareSpeed allows user to change the etcd tuning profile which configures + // the latency parameters for heartbeat interval and leader election timeouts + // allowing the cluster to tolerate longer round-trip-times between etcd members. + // Valid values are "", "Standard" and "Slower". + // "" means no opinion and the platform is left to choose a reasonable default + // which is subject to change without notice. + // +openshift:enable:FeatureGate=HardwareSpeed + // +optional + HardwareSpeed ControlPlaneHardwareSpeed `json:"controlPlaneHardwareSpeed"` + + // backendQuotaGiB sets the etcd backend storage size limit in gibibytes. + // The value should be an integer not less than 8 and not more than 32. + // When not specified, the default value is 8. + // +kubebuilder:default:=8 + // +kubebuilder:validation:Minimum=8 + // +kubebuilder:validation:Maximum=32 + // +kubebuilder:validation:XValidation:rule="self>=oldSelf",message="etcd backendQuotaGiB may not be decreased" + // +openshift:enable:FeatureGate=EtcdBackendQuota + // +default=8 + // +optional + BackendQuotaGiB int32 `json:"backendQuotaGiB,omitempty"` +} + +type EtcdStatus struct { + StaticPodOperatorStatus `json:",inline"` + HardwareSpeed ControlPlaneHardwareSpeed `json:"controlPlaneHardwareSpeed"` +} + +const ( + // StandardHardwareSpeed provides the normal tolerances for hardware speed and latency. + // Currently sets (values subject to change at any time): + // ETCD_HEARTBEAT_INTERVAL: 100ms + // ETCD_LEADER_ELECTION_TIMEOUT: 1000ms + StandardHardwareSpeed ControlPlaneHardwareSpeed = "Standard" + // SlowerHardwareSpeed provides more tolerance for slower hardware and/or higher latency networks. + // Sets (values subject to change): + // ETCD_HEARTBEAT_INTERVAL: 5x Standard + // ETCD_LEADER_ELECTION_TIMEOUT: 2.5x Standard + SlowerHardwareSpeed ControlPlaneHardwareSpeed = "Slower" +) + +// ControlPlaneHardwareSpeed declares valid hardware speed tolerance levels +// +enum +// +kubebuilder:validation:Enum:="";Standard;Slower +type ControlPlaneHardwareSpeed string + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeAPISOperatorConfigList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type EtcdList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []Etcd `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/vendor/github.com/openshift/api/operator/v1/types_ingress.go new file mode 100644 index 0000000000000..6304c95636130 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_ingress.go @@ -0,0 +1,2035 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + corev1 "k8s.io/api/core/v1" + + configv1 "github.com/openshift/api/config/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.availableReplicas,selectorpath=.status.selector +// +kubebuilder:resource:path=ingresscontrollers,scope=Namespaced +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/616 +// +openshift:capability=Ingress +// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=ingress,operatorOrdering=00 + +// IngressController describes a managed ingress controller for the cluster. The +// controller can service OpenShift Route and Kubernetes Ingress resources. +// +// When an IngressController is created, a new ingress controller deployment is +// created to allow external traffic to reach the services that expose Ingress +// or Route resources. Updating this resource may lead to disruption for public +// facing network connections as a new ingress controller revision may be rolled +// out. +// +// https://kubernetes.io/docs/concepts/services-networking/ingress-controllers +// +// Whenever possible, sensible defaults for the platform are used. See each +// field for more details. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type IngressController struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the specification of the desired behavior of the IngressController. + Spec IngressControllerSpec `json:"spec,omitempty"` + // status is the most recently observed status of the IngressController. + Status IngressControllerStatus `json:"status,omitempty"` +} + +// IngressControllerSpec is the specification of the desired behavior of the +// IngressController. +type IngressControllerSpec struct { + // domain is a DNS name serviced by the ingress controller and is used to + // configure multiple features: + // + // * For the LoadBalancerService endpoint publishing strategy, domain is + // used to configure DNS records. See endpointPublishingStrategy. + // + // * When using a generated default certificate, the certificate will be valid + // for domain and its subdomains. See defaultCertificate. + // + // * The value is published to individual Route statuses so that end-users + // know where to target external DNS records. + // + // domain must be unique among all IngressControllers, and cannot be + // updated. + // + // If empty, defaults to ingress.config.openshift.io/cluster .spec.domain. + // + // +optional + Domain string `json:"domain,omitempty"` + + // httpErrorCodePages specifies a configmap with custom error pages. + // The administrator must create this configmap in the openshift-config namespace. + // This configmap should have keys in the format "error-page-.http", + // where is an HTTP error code. + // For example, "error-page-503.http" defines an error page for HTTP 503 responses. + // Currently only error pages for 503 and 404 responses can be customized. + // Each value in the configmap should be the full response, including HTTP headers. + // Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http + // If this field is empty, the ingress controller uses the default error pages. + HttpErrorCodePages configv1.ConfigMapNameReference `json:"httpErrorCodePages,omitempty"` + + // replicas is the desired number of ingress controller replicas. If unset, + // the default depends on the value of the defaultPlacement field in the + // cluster config.openshift.io/v1/ingresses status. + // + // The value of replicas is set based on the value of a chosen field in the + // Infrastructure CR. If defaultPlacement is set to ControlPlane, the + // chosen field will be controlPlaneTopology. If it is set to Workers the + // chosen field will be infrastructureTopology. Replicas will then be set to 1 + // or 2 based whether the chosen field's value is SingleReplica or + // HighlyAvailable, respectively. + // + // These defaults are subject to change. + // + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // endpointPublishingStrategy is used to publish the ingress controller + // endpoints to other networks, enable load balancer integrations, etc. + // + // If unset, the default is based on + // infrastructure.config.openshift.io/cluster .status.platform: + // + // AWS: LoadBalancerService (with External scope) + // Azure: LoadBalancerService (with External scope) + // GCP: LoadBalancerService (with External scope) + // IBMCloud: LoadBalancerService (with External scope) + // AlibabaCloud: LoadBalancerService (with External scope) + // Libvirt: HostNetwork + // + // Any other platform types (including None) default to HostNetwork. + // + // endpointPublishingStrategy cannot be updated. + // + // +optional + EndpointPublishingStrategy *EndpointPublishingStrategy `json:"endpointPublishingStrategy,omitempty"` + + // defaultCertificate is a reference to a secret containing the default + // certificate served by the ingress controller. When Routes don't specify + // their own certificate, defaultCertificate is used. + // + // The secret must contain the following keys and data: + // + // tls.crt: certificate file contents + // tls.key: key file contents + // + // If unset, a wildcard certificate is automatically generated and used. The + // certificate is valid for the ingress controller domain (and subdomains) and + // the generated certificate's CA will be automatically integrated with the + // cluster's trust store. + // + // If a wildcard certificate is used and shared by multiple + // HTTP/2 enabled routes (which implies ALPN) then clients + // (i.e., notably browsers) are at liberty to reuse open + // connections. This means a client can reuse a connection to + // another route and that is likely to fail. This behaviour is + // generally known as connection coalescing. + // + // The in-use certificate (whether generated or user-specified) will be + // automatically integrated with OpenShift's built-in OAuth server. + // + // +optional + DefaultCertificate *corev1.LocalObjectReference `json:"defaultCertificate,omitempty"` + + // namespaceSelector is used to filter the set of namespaces serviced by the + // ingress controller. This is useful for implementing shards. + // + // If unset, the default is no filtering. + // + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` + + // routeSelector is used to filter the set of Routes serviced by the ingress + // controller. This is useful for implementing shards. + // + // If unset, the default is no filtering. + // + // +optional + RouteSelector *metav1.LabelSelector `json:"routeSelector,omitempty"` + + // nodePlacement enables explicit control over the scheduling of the ingress + // controller. + // + // If unset, defaults are used. See NodePlacement for more details. + // + // +optional + NodePlacement *NodePlacement `json:"nodePlacement,omitempty"` + + // tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers. + // + // If unset, the default is based on the apiservers.config.openshift.io/cluster resource. + // + // Note that when using the Old, Intermediate, and Modern profile types, the effective + // profile configuration is subject to change between releases. For example, given + // a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade + // to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress + // controller, resulting in a rollout. + // + // +optional + TLSSecurityProfile *configv1.TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` + + // clientTLS specifies settings for requesting and verifying client + // certificates, which can be used to enable mutual TLS for + // edge-terminated and reencrypt routes. + // + // +optional + ClientTLS ClientTLS `json:"clientTLS"` + + // routeAdmission defines a policy for handling new route claims (for example, + // to allow or deny claims across namespaces). + // + // If empty, defaults will be applied. See specific routeAdmission fields + // for details about their defaults. + // + // +optional + RouteAdmission *RouteAdmissionPolicy `json:"routeAdmission,omitempty"` + + // logging defines parameters for what should be logged where. If this + // field is empty, operational logs are enabled but access logs are + // disabled. + // + // +optional + Logging *IngressControllerLogging `json:"logging,omitempty"` + + // httpHeaders defines policy for HTTP headers. + // + // If this field is empty, the default values are used. + // + // +optional + HTTPHeaders *IngressControllerHTTPHeaders `json:"httpHeaders,omitempty"` + + // httpEmptyRequestsPolicy describes how HTTP connections should be + // handled if the connection times out before a request is received. + // Allowed values for this field are "Respond" and "Ignore". If the + // field is set to "Respond", the ingress controller sends an HTTP 400 + // or 408 response, logs the connection (if access logging is enabled), + // and counts the connection in the appropriate metrics. If the field + // is set to "Ignore", the ingress controller closes the connection + // without sending a response, logging the connection, or incrementing + // metrics. The default value is "Respond". + // + // Typically, these connections come from load balancers' health probes + // or Web browsers' speculative connections ("preconnect") and can be + // safely ignored. However, these requests may also be caused by + // network errors, and so setting this field to "Ignore" may impede + // detection and diagnosis of problems. In addition, these requests may + // be caused by port scans, in which case logging empty requests may aid + // in detecting intrusion attempts. + // + // +optional + // +kubebuilder:default:="Respond" + HTTPEmptyRequestsPolicy HTTPEmptyRequestsPolicy `json:"httpEmptyRequestsPolicy,omitempty"` + + // tuningOptions defines parameters for adjusting the performance of + // ingress controller pods. All fields are optional and will use their + // respective defaults if not set. See specific tuningOptions fields for + // more details. + // + // Setting fields within tuningOptions is generally not recommended. The + // default values are suitable for most configurations. + // + // +optional + TuningOptions IngressControllerTuningOptions `json:"tuningOptions,omitempty"` + + // unsupportedConfigOverrides allows specifying unsupported + // configuration options. Its use is unsupported. + // + // +optional + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + UnsupportedConfigOverrides runtime.RawExtension `json:"unsupportedConfigOverrides"` + + // httpCompression defines a policy for HTTP traffic compression. + // By default, there is no HTTP compression. + // + // +optional + HTTPCompression HTTPCompressionPolicy `json:"httpCompression,omitempty"` +} + +// httpCompressionPolicy turns on compression for the specified MIME types. +// +// This field is optional, and its absence implies that compression should not be enabled +// globally in HAProxy. +// +// If httpCompressionPolicy exists, compression should be enabled only for the specified +// MIME types. +type HTTPCompressionPolicy struct { + // mimeTypes is a list of MIME types that should have compression applied. + // This list can be empty, in which case the ingress controller does not apply compression. + // + // Note: Not all MIME types benefit from compression, but HAProxy will still use resources + // to try to compress if instructed to. Generally speaking, text (html, css, js, etc.) + // formats benefit from compression, but formats that are already compressed (image, + // audio, video, etc.) benefit little in exchange for the time and cpu spent on compressing + // again. See https://joehonton.medium.com/the-gzip-penalty-d31bd697f1a2 + // + // +listType=set + MimeTypes []CompressionMIMEType `json:"mimeTypes,omitempty"` +} + +// CompressionMIMEType defines the format of a single MIME type. +// E.g. "text/css; charset=utf-8", "text/html", "text/*", "image/svg+xml", +// "application/octet-stream", "X-custom/customsub", etc. +// +// The format should follow the Content-Type definition in RFC 1341: +// Content-Type := type "/" subtype *[";" parameter] +// - The type in Content-Type can be one of: +// application, audio, image, message, multipart, text, video, or a custom +// type preceded by "X-" and followed by a token as defined below. +// - The token is a string of at least one character, and not containing white +// space, control characters, or any of the characters in the tspecials set. +// - The tspecials set contains the characters ()<>@,;:\"/[]?.= +// - The subtype in Content-Type is also a token. +// - The optional parameter/s following the subtype are defined as: +// token "=" (token / quoted-string) +// - The quoted-string, as defined in RFC 822, is surrounded by double quotes +// and can contain white space plus any character EXCEPT \, ", and CR. +// It can also contain any single ASCII character as long as it is escaped by \. +// +// +kubebuilder:validation:Pattern=`^(?i)(x-[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+|application|audio|image|message|multipart|text|video)/[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+(; *[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+=([^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+|"(\\[\x00-\x7F]|[^\x0D"\\])*"))*$` +type CompressionMIMEType string + +// NodePlacement describes node scheduling configuration for an ingress +// controller. +type NodePlacement struct { + // nodeSelector is the node selector applied to ingress controller + // deployments. + // + // If set, the specified selector is used and replaces the default. + // + // If unset, the default depends on the value of the defaultPlacement + // field in the cluster config.openshift.io/v1/ingresses status. + // + // When defaultPlacement is Workers, the default is: + // + // kubernetes.io/os: linux + // node-role.kubernetes.io/worker: '' + // + // When defaultPlacement is ControlPlane, the default is: + // + // kubernetes.io/os: linux + // node-role.kubernetes.io/master: '' + // + // These defaults are subject to change. + // + // Note that using nodeSelector.matchExpressions is not supported. Only + // nodeSelector.matchLabels may be used. This is a limitation of the + // Kubernetes API: the pod spec does not allow complex expressions for + // node selectors. + // + // +optional + NodeSelector *metav1.LabelSelector `json:"nodeSelector,omitempty"` + + // tolerations is a list of tolerations applied to ingress controller + // deployments. + // + // The default is an empty list. + // + // See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + // + // +optional + // +listType=atomic + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +// EndpointPublishingStrategyType is a way to publish ingress controller endpoints. +// +kubebuilder:validation:Enum=LoadBalancerService;HostNetwork;Private;NodePortService +type EndpointPublishingStrategyType string + +const ( + // LoadBalancerService publishes the ingress controller using a Kubernetes + // LoadBalancer Service. + LoadBalancerServiceStrategyType EndpointPublishingStrategyType = "LoadBalancerService" + + // HostNetwork publishes the ingress controller on node ports where the + // ingress controller is deployed. + HostNetworkStrategyType EndpointPublishingStrategyType = "HostNetwork" + + // Private does not publish the ingress controller. + PrivateStrategyType EndpointPublishingStrategyType = "Private" + + // NodePortService publishes the ingress controller using a Kubernetes NodePort Service. + NodePortServiceStrategyType EndpointPublishingStrategyType = "NodePortService" +) + +// LoadBalancerScope is the scope at which a load balancer is exposed. +// +kubebuilder:validation:Enum=Internal;External +type LoadBalancerScope string + +var ( + // InternalLoadBalancer is a load balancer that is exposed only on the + // cluster's private network. + InternalLoadBalancer LoadBalancerScope = "Internal" + + // ExternalLoadBalancer is a load balancer that is exposed on the + // cluster's public network (which is typically on the Internet). + ExternalLoadBalancer LoadBalancerScope = "External" +) + +// CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" +// or "fd00::/8"). +// +kubebuilder:validation:Pattern=`(^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$)` +// + --- +// + The regex for the IPv4 CIDR range was taken from other CIDR fields in the OpenShift API +// + and the one for the IPv6 CIDR range was taken from +// + https://blog.markhatton.co.uk/2011/03/15/regular-expressions-for-ip-addresses-cidr-ranges-and-hostnames/ +// + The resulting regex is an OR of both regexes. +type CIDR string + +// LoadBalancerStrategy holds parameters for a load balancer. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=SetEIPForNLBIngressController,rule="!has(self.scope) || self.scope != 'Internal' || !has(self.providerParameters) || !has(self.providerParameters.aws) || !has(self.providerParameters.aws.networkLoadBalancer) || !has(self.providerParameters.aws.networkLoadBalancer.eipAllocations)",message="eipAllocations are forbidden when the scope is Internal." +// +kubebuilder:validation:XValidation:rule=`!has(self.scope) || self.scope != 'Internal' || !has(self.providerParameters) || !has(self.providerParameters.openstack) || !has(self.providerParameters.openstack.floatingIP) || self.providerParameters.openstack.floatingIP == ""`,message="cannot specify a floating ip when scope is internal" +type LoadBalancerStrategy struct { + // scope indicates the scope at which the load balancer is exposed. + // Possible values are "External" and "Internal". + // + // +required + Scope LoadBalancerScope `json:"scope"` + + // allowedSourceRanges specifies an allowlist of IP address ranges to which + // access to the load balancer should be restricted. Each range must be + // specified using CIDR notation (e.g. "10.0.0.0/8" or "fd00::/8"). If no range is + // specified, "0.0.0.0/0" for IPv4 and "::/0" for IPv6 are used by default, + // which allows all source addresses. + // + // To facilitate migration from earlier versions of OpenShift that did + // not have the allowedSourceRanges field, you may set the + // service.beta.kubernetes.io/load-balancer-source-ranges annotation on + // the "router-" service in the + // "openshift-ingress" namespace, and this annotation will take + // effect if allowedSourceRanges is empty on OpenShift 4.12. + // + // +nullable + // +optional + // +listType=atomic + AllowedSourceRanges []CIDR `json:"allowedSourceRanges,omitempty"` + + // providerParameters holds desired load balancer information specific to + // the underlying infrastructure provider. + // + // If empty, defaults will be applied. See specific providerParameters + // fields for details about their defaults. + // + // +optional + ProviderParameters *ProviderLoadBalancerParameters `json:"providerParameters,omitempty"` + + // dnsManagementPolicy indicates if the lifecycle of the wildcard DNS record + // associated with the load balancer service will be managed by + // the ingress operator. It defaults to Managed. + // Valid values are: Managed and Unmanaged. + // + // +kubebuilder:default:="Managed" + // +required + // +default="Managed" + DNSManagementPolicy LoadBalancerDNSManagementPolicy `json:"dnsManagementPolicy,omitempty"` +} + +// LoadBalancerDNSManagementPolicy is a policy for configuring how +// ingresscontrollers manage DNS. +// +// +kubebuilder:validation:Enum=Managed;Unmanaged +type LoadBalancerDNSManagementPolicy string + +const ( + // ManagedLoadBalancerDNS specifies that the operator manages + // a wildcard DNS record for the ingresscontroller. + ManagedLoadBalancerDNS LoadBalancerDNSManagementPolicy = "Managed" + // UnmanagedLoadBalancerDNS specifies that the operator does not manage + // any wildcard DNS record for the ingresscontroller. + UnmanagedLoadBalancerDNS LoadBalancerDNSManagementPolicy = "Unmanaged" +) + +// ProviderLoadBalancerParameters holds desired load balancer information +// specific to the underlying infrastructure provider. +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'OpenStack' ? true : !has(self.openstack)",message="openstack is not permitted when type is not OpenStack" +// +union +type ProviderLoadBalancerParameters struct { + // type is the underlying infrastructure provider for the load balancer. + // Allowed values are "AWS", "Azure", "BareMetal", "GCP", "IBM", "Nutanix", + // "OpenStack", and "VSphere". + // + // +unionDiscriminator + // +required + Type LoadBalancerProviderType `json:"type"` + + // aws provides configuration settings that are specific to AWS + // load balancers. + // + // If empty, defaults will be applied. See specific aws fields for + // details about their defaults. + // + // +optional + AWS *AWSLoadBalancerParameters `json:"aws,omitempty"` + + // gcp provides configuration settings that are specific to GCP + // load balancers. + // + // If empty, defaults will be applied. See specific gcp fields for + // details about their defaults. + // + // +optional + GCP *GCPLoadBalancerParameters `json:"gcp,omitempty"` + + // ibm provides configuration settings that are specific to IBM Cloud + // load balancers. + // + // If empty, defaults will be applied. See specific ibm fields for + // details about their defaults. + // + // +optional + IBM *IBMLoadBalancerParameters `json:"ibm,omitempty"` + + // openstack provides configuration settings that are specific to OpenStack + // load balancers. + // + // If empty, defaults will be applied. See specific openstack fields for + // details about their defaults. + // + // +optional + OpenStack *OpenStackLoadBalancerParameters `json:"openstack,omitempty"` +} + +// LoadBalancerProviderType is the underlying infrastructure provider for the +// load balancer. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "IBM", "Nutanix", +// "OpenStack", and "VSphere". +// +// +kubebuilder:validation:Enum=AWS;Azure;BareMetal;GCP;Nutanix;OpenStack;VSphere;IBM +type LoadBalancerProviderType string + +const ( + AWSLoadBalancerProvider LoadBalancerProviderType = "AWS" + AzureLoadBalancerProvider LoadBalancerProviderType = "Azure" + GCPLoadBalancerProvider LoadBalancerProviderType = "GCP" + OpenStackLoadBalancerProvider LoadBalancerProviderType = "OpenStack" + VSphereLoadBalancerProvider LoadBalancerProviderType = "VSphere" + IBMLoadBalancerProvider LoadBalancerProviderType = "IBM" + BareMetalLoadBalancerProvider LoadBalancerProviderType = "BareMetal" + AlibabaCloudLoadBalancerProvider LoadBalancerProviderType = "AlibabaCloud" + NutanixLoadBalancerProvider LoadBalancerProviderType = "Nutanix" +) + +// AWSLoadBalancerParameters provides configuration settings that are +// specific to AWS load balancers. +// +union +type AWSLoadBalancerParameters struct { + // type is the type of AWS load balancer to instantiate for an ingresscontroller. + // + // Valid values are: + // + // * "Classic": A Classic Load Balancer that makes routing decisions at either + // the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See + // the following for additional details: + // + // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb + // + // * "NLB": A Network Load Balancer that makes routing decisions at the + // transport layer (TCP/SSL). See the following for additional details: + // + // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb + // + // +unionDiscriminator + // +required + Type AWSLoadBalancerType `json:"type"` + + // classicLoadBalancerParameters holds configuration parameters for an AWS + // classic load balancer. Present only if type is Classic. + // + // +optional + ClassicLoadBalancerParameters *AWSClassicLoadBalancerParameters `json:"classicLoadBalancer,omitempty"` + + // networkLoadBalancerParameters holds configuration parameters for an AWS + // network load balancer. Present only if type is NLB. + // + // +optional + NetworkLoadBalancerParameters *AWSNetworkLoadBalancerParameters `json:"networkLoadBalancer,omitempty"` +} + +// AWSLoadBalancerType is the type of AWS load balancer to instantiate. +// +kubebuilder:validation:Enum=Classic;NLB +type AWSLoadBalancerType string + +const ( + AWSClassicLoadBalancer AWSLoadBalancerType = "Classic" + AWSNetworkLoadBalancer AWSLoadBalancerType = "NLB" +) + +// AWSSubnets contains a list of references to AWS subnets by +// ID or name. +// +kubebuilder:validation:XValidation:rule=`has(self.ids) && has(self.names) ? size(self.ids + self.names) <= 10 : true`,message="the total number of subnets cannot exceed 10" +// +kubebuilder:validation:XValidation:rule=`has(self.ids) && self.ids.size() > 0 || has(self.names) && self.names.size() > 0`,message="must specify at least 1 subnet name or id" +type AWSSubnets struct { + // ids specifies a list of AWS subnets by subnet ID. + // Subnet IDs must start with "subnet-", consist only + // of alphanumeric characters, must be exactly 24 + // characters long, must be unique, and the total + // number of subnets specified by ids and names + // must not exceed 10. + // + // +optional + // +listType=atomic + // +kubebuilder:validation:XValidation:rule=`self.all(x, self.exists_one(y, x == y))`,message="subnet ids cannot contain duplicates" + // + Note: Though it may seem redundant, MaxItems is necessary to prevent exceeding of the cost budget for the validation rules. + // +kubebuilder:validation:MaxItems=10 + IDs []AWSSubnetID `json:"ids,omitempty"` + + // names specifies a list of AWS subnets by subnet name. + // Subnet names must not start with "subnet-", must not + // include commas, must be under 256 characters in length, + // must be unique, and the total number of subnets + // specified by ids and names must not exceed 10. + // + // +optional + // +listType=atomic + // +kubebuilder:validation:XValidation:rule=`self.all(x, self.exists_one(y, x == y))`,message="subnet names cannot contain duplicates" + // + Note: Though it may seem redundant, MaxItems is necessary to prevent exceeding of the cost budget for the validation rules. + // +kubebuilder:validation:MaxItems=10 + Names []AWSSubnetName `json:"names,omitempty"` +} + +// AWSSubnetID is a reference to an AWS subnet ID. +// +kubebuilder:validation:MinLength=24 +// +kubebuilder:validation:MaxLength=24 +// +kubebuilder:validation:Pattern=`^subnet-[0-9A-Za-z]+$` +type AWSSubnetID string + +// AWSSubnetName is a reference to an AWS subnet name. +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=256 +// +kubebuilder:validation:XValidation:rule=`!self.contains(',')`,message="subnet name cannot contain a comma" +// +kubebuilder:validation:XValidation:rule=`!self.startsWith('subnet-')`,message="subnet name cannot start with 'subnet-'" +type AWSSubnetName string + +// GCPLoadBalancerParameters provides configuration settings that are +// specific to GCP load balancers. +type GCPLoadBalancerParameters struct { + // clientAccess describes how client access is restricted for internal + // load balancers. + // + // Valid values are: + // * "Global": Specifying an internal load balancer with Global client access + // allows clients from any region within the VPC to communicate with the load + // balancer. + // + // https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access + // + // * "Local": Specifying an internal load balancer with Local client access + // means only clients within the same region (and VPC) as the GCP load balancer + // can communicate with the load balancer. Note that this is the default behavior. + // + // https://cloud.google.com/load-balancing/docs/internal#client_access + // + // +optional + ClientAccess GCPClientAccess `json:"clientAccess,omitempty"` +} + +// GCPClientAccess describes how client access is restricted for internal +// load balancers. +// +kubebuilder:validation:Enum=Global;Local +type GCPClientAccess string + +const ( + GCPGlobalAccess GCPClientAccess = "Global" + GCPLocalAccess GCPClientAccess = "Local" +) + +// IBMLoadBalancerParameters provides configuration settings that are +// specific to IBM Cloud load balancers. +type IBMLoadBalancerParameters struct { + // protocol specifies whether the load balancer uses PROXY protocol to forward connections to + // the IngressController. See "service.kubernetes.io/ibm-load-balancer-cloud-provider-enable-features: + // "proxy-protocol"" at https://cloud.ibm.com/docs/containers?topic=containers-vpc-lbaas" + // + // PROXY protocol can be used with load balancers that support it to + // communicate the source addresses of client connections when + // forwarding those connections to the IngressController. Using PROXY + // protocol enables the IngressController to report those source + // addresses instead of reporting the load balancer's address in HTTP + // headers and logs. Note that enabling PROXY protocol on the + // IngressController will cause connections to fail if you are not using + // a load balancer that uses PROXY protocol to forward connections to + // the IngressController. See + // http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for + // information about PROXY protocol. + // + // Valid values for protocol are TCP, PROXY and omitted. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default is TCP, without the proxy protocol enabled. + // + // +optional + Protocol IngressControllerProtocol `json:"protocol,omitempty"` +} + +// OpenStackLoadBalancerParameters provides configuration settings that are +// specific to OpenStack load balancers. +type OpenStackLoadBalancerParameters struct { + // loadBalancerIP is tombstoned since the field was replaced by floatingIP. + // LoadBalancerIP string `json:"loadBalancerIP,omitempty"` + + // floatingIP specifies the IP address that the load balancer will use. + // When not specified, an IP address will be assigned randomly by the OpenStack cloud provider. + // When specified, the floating IP has to be pre-created. If the + // specified value is not a floating IP or is already claimed, the + // OpenStack cloud provider won't be able to provision the load + // balancer. + // This field may only be used if the IngressController has External scope. + // This value must be a valid IPv4 or IPv6 address. + // + --- + // + Note: this field is meant to be set by the ingress controller + // + to populate the `Service.Spec.LoadBalancerIP` field which has been + // + deprecated in Kubernetes: + // + https://github.com/kubernetes/kubernetes/pull/107235 + // + However, the field is still used by cloud-provider-openstack to reconcile + // + the floating IP that we attach to the external load balancer. + // + // +kubebuilder:validation:XValidation:rule="isIP(self)",message="floatingIP must be a valid IPv4 or IPv6 address" + // +optional + FloatingIP string `json:"floatingIP,omitempty"` +} + +// AWSClassicLoadBalancerParameters holds configuration parameters for an +// AWS Classic load balancer. +type AWSClassicLoadBalancerParameters struct { + // connectionIdleTimeout specifies the maximum time period that a + // connection may be idle before the load balancer closes the + // connection. The value must be parseable as a time duration value; + // see . A nil or zero value + // means no opinion, in which case a default value is used. The default + // value for this field is 60s. This default is subject to change. + // + // +kubebuilder:validation:Format=duration + // +optional + ConnectionIdleTimeout metav1.Duration `json:"connectionIdleTimeout,omitempty"` + + // subnets specifies the subnets to which the load balancer will + // attach. The subnets may be specified by either their + // ID or name. The total number of subnets is limited to 10. + // + // In order for the load balancer to be provisioned with subnets, + // each subnet must exist, each subnet must be from a different + // availability zone, and the load balancer service must be + // recreated to pick up new values. + // + // When omitted from the spec, the subnets will be auto-discovered + // for each availability zone. Auto-discovered subnets are not reported + // in the status of the IngressController object. + // + // +optional + // +openshift:enable:FeatureGate=IngressControllerLBSubnetsAWS + Subnets *AWSSubnets `json:"subnets,omitempty"` +} + +// AWSNetworkLoadBalancerParameters holds configuration parameters for an +// AWS Network load balancer. For Example: Setting AWS EIPs https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html +// +openshift:validation:FeatureGateAwareXValidation:featureGate=SetEIPForNLBIngressController,rule=`has(self.subnets) && has(self.subnets.ids) && has(self.subnets.names) && has(self.eipAllocations) ? size(self.subnets.ids + self.subnets.names) == size(self.eipAllocations) : true`,message="number of subnets must be equal to number of eipAllocations" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=SetEIPForNLBIngressController,rule=`has(self.subnets) && has(self.subnets.ids) && !has(self.subnets.names) && has(self.eipAllocations) ? size(self.subnets.ids) == size(self.eipAllocations) : true`,message="number of subnets must be equal to number of eipAllocations" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=SetEIPForNLBIngressController,rule=`has(self.subnets) && has(self.subnets.names) && !has(self.subnets.ids) && has(self.eipAllocations) ? size(self.subnets.names) == size(self.eipAllocations) : true`,message="number of subnets must be equal to number of eipAllocations" +type AWSNetworkLoadBalancerParameters struct { + // subnets specifies the subnets to which the load balancer will + // attach. The subnets may be specified by either their + // ID or name. The total number of subnets is limited to 10. + // + // In order for the load balancer to be provisioned with subnets, + // each subnet must exist, each subnet must be from a different + // availability zone, and the load balancer service must be + // recreated to pick up new values. + // + // When omitted from the spec, the subnets will be auto-discovered + // for each availability zone. Auto-discovered subnets are not reported + // in the status of the IngressController object. + // + // +optional + // +openshift:enable:FeatureGate=IngressControllerLBSubnetsAWS + Subnets *AWSSubnets `json:"subnets,omitempty"` + + // eipAllocations is a list of IDs for Elastic IP (EIP) addresses that + // are assigned to the Network Load Balancer. + // The following restrictions apply: + // + // eipAllocations can only be used with external scope, not internal. + // An EIP can be allocated to only a single IngressController. + // The number of EIP allocations must match the number of subnets that are used for the load balancer. + // Each EIP allocation must be unique. + // A maximum of 10 EIP allocations are permitted. + // + // See https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html for general + // information about configuration, characteristics, and limitations of Elastic IP addresses. + // + // +openshift:enable:FeatureGate=SetEIPForNLBIngressController + // +optional + // +listType=atomic + // +kubebuilder:validation:XValidation:rule=`self.all(x, self.exists_one(y, x == y))`,message="eipAllocations cannot contain duplicates" + // +kubebuilder:validation:MaxItems=10 + EIPAllocations []EIPAllocation `json:"eipAllocations"` +} + +// EIPAllocation is an ID for an Elastic IP (EIP) address that can be allocated to an ELB in the AWS environment. +// Values must begin with `eipalloc-` followed by exactly 17 hexadecimal (`[0-9a-fA-F]`) characters. +// + Explanation of the regex `^eipalloc-[0-9a-fA-F]{17}$` for validating value of the EIPAllocation: +// + ^eipalloc- ensures the string starts with "eipalloc-". +// + [0-9a-fA-F]{17} matches exactly 17 hexadecimal characters (0-9, a-f, A-F). +// + $ ensures the string ends after the 17 hexadecimal characters. +// + Example of Valid and Invalid values: +// + eipalloc-1234567890abcdef1 is valid. +// + eipalloc-1234567890abcde is not valid (too short). +// + eipalloc-1234567890abcdefg is not valid (contains a non-hex character 'g'). +// + Max length is calculated as follows: +// + eipalloc- = 9 chars and 17 hexadecimal chars after `-` +// + So, total is 17 + 9 = 26 chars required for value of an EIPAllocation. +// +// +kubebuilder:validation:MinLength=26 +// +kubebuilder:validation:MaxLength=26 +// +kubebuilder:validation:XValidation:rule=`self.startsWith('eipalloc-')`,message="eipAllocations should start with 'eipalloc-'" +// +kubebuilder:validation:XValidation:rule=`self.split("-", 2)[1].matches('[0-9a-fA-F]{17}$')`,message="eipAllocations must be 'eipalloc-' followed by exactly 17 hexadecimal characters (0-9, a-f, A-F)" +type EIPAllocation string + +// HostNetworkStrategy holds parameters for the HostNetwork endpoint publishing +// strategy. +type HostNetworkStrategy struct { + // protocol specifies whether the IngressController expects incoming + // connections to use plain TCP or whether the IngressController expects + // PROXY protocol. + // + // PROXY protocol can be used with load balancers that support it to + // communicate the source addresses of client connections when + // forwarding those connections to the IngressController. Using PROXY + // protocol enables the IngressController to report those source + // addresses instead of reporting the load balancer's address in HTTP + // headers and logs. Note that enabling PROXY protocol on the + // IngressController will cause connections to fail if you are not using + // a load balancer that uses PROXY protocol to forward connections to + // the IngressController. See + // http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for + // information about PROXY protocol. + // + // The following values are valid for this field: + // + // * The empty string. + // * "TCP". + // * "PROXY". + // + // The empty string specifies the default, which is TCP without PROXY + // protocol. Note that the default is subject to change. + // + // +optional + Protocol IngressControllerProtocol `json:"protocol,omitempty"` + + // httpPort is the port on the host which should be used to listen for + // HTTP requests. This field should be set when port 80 is already in use. + // The value should not coincide with the NodePort range of the cluster. + // When the value is 0 or is not specified it defaults to 80. + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:default=80 + // +optional + HTTPPort int32 `json:"httpPort,omitempty"` + + // httpsPort is the port on the host which should be used to listen for + // HTTPS requests. This field should be set when port 443 is already in use. + // The value should not coincide with the NodePort range of the cluster. + // When the value is 0 or is not specified it defaults to 443. + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:default=443 + // +optional + HTTPSPort int32 `json:"httpsPort,omitempty"` + + // statsPort is the port on the host where the stats from the router are + // published. The value should not coincide with the NodePort range of the + // cluster. If an external load balancer is configured to forward connections + // to this IngressController, the load balancer should use this port for + // health checks. The load balancer can send HTTP probes on this port on a + // given node, with the path /healthz/ready to determine if the ingress + // controller is ready to receive traffic on the node. For proper operation + // the load balancer must not forward traffic to a node until the health + // check reports ready. The load balancer should also stop forwarding requests + // within a maximum of 45 seconds after /healthz/ready starts reporting + // not-ready. Probing every 5 to 10 seconds, with a 5-second timeout and with + // a threshold of two successful or failed requests to become healthy or + // unhealthy respectively, are well-tested values. When the value is 0 or + // is not specified it defaults to 1936. + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:default=1936 + // +optional + StatsPort int32 `json:"statsPort,omitempty"` +} + +// PrivateStrategy holds parameters for the Private endpoint publishing +// strategy. +type PrivateStrategy struct { + // protocol specifies whether the IngressController expects incoming + // connections to use plain TCP or whether the IngressController expects + // PROXY protocol. + // + // PROXY protocol can be used with load balancers that support it to + // communicate the source addresses of client connections when + // forwarding those connections to the IngressController. Using PROXY + // protocol enables the IngressController to report those source + // addresses instead of reporting the load balancer's address in HTTP + // headers and logs. Note that enabling PROXY protocol on the + // IngressController will cause connections to fail if you are not using + // a load balancer that uses PROXY protocol to forward connections to + // the IngressController. See + // http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for + // information about PROXY protocol. + // + // The following values are valid for this field: + // + // * The empty string. + // * "TCP". + // * "PROXY". + // + // The empty string specifies the default, which is TCP without PROXY + // protocol. Note that the default is subject to change. + // + // +optional + Protocol IngressControllerProtocol `json:"protocol,omitempty"` +} + +// NodePortStrategy holds parameters for the NodePortService endpoint publishing strategy. +type NodePortStrategy struct { + // protocol specifies whether the IngressController expects incoming + // connections to use plain TCP or whether the IngressController expects + // PROXY protocol. + // + // PROXY protocol can be used with load balancers that support it to + // communicate the source addresses of client connections when + // forwarding those connections to the IngressController. Using PROXY + // protocol enables the IngressController to report those source + // addresses instead of reporting the load balancer's address in HTTP + // headers and logs. Note that enabling PROXY protocol on the + // IngressController will cause connections to fail if you are not using + // a load balancer that uses PROXY protocol to forward connections to + // the IngressController. See + // http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for + // information about PROXY protocol. + // + // The following values are valid for this field: + // + // * The empty string. + // * "TCP". + // * "PROXY". + // + // The empty string specifies the default, which is TCP without PROXY + // protocol. Note that the default is subject to change. + // + // +optional + Protocol IngressControllerProtocol `json:"protocol,omitempty"` +} + +// IngressControllerProtocol specifies whether PROXY protocol is enabled or not. +// +kubebuilder:validation:Enum="";TCP;PROXY +type IngressControllerProtocol string + +const ( + DefaultProtocol IngressControllerProtocol = "" + TCPProtocol IngressControllerProtocol = "TCP" + ProxyProtocol IngressControllerProtocol = "PROXY" +) + +// EndpointPublishingStrategy is a way to publish the endpoints of an +// IngressController, and represents the type and any additional configuration +// for a specific type. +// +union +type EndpointPublishingStrategy struct { + // type is the publishing strategy to use. Valid values are: + // + // * LoadBalancerService + // + // Publishes the ingress controller using a Kubernetes LoadBalancer Service. + // + // In this configuration, the ingress controller deployment uses container + // networking. A LoadBalancer Service is created to publish the deployment. + // + // See: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + // + // If domain is set, a wildcard DNS record will be managed to point at the + // LoadBalancer Service's external name. DNS records are managed only in DNS + // zones defined by dns.config.openshift.io/cluster .spec.publicZone and + // .spec.privateZone. + // + // Wildcard DNS management is currently supported only on the AWS, Azure, + // and GCP platforms. + // + // * HostNetwork + // + // Publishes the ingress controller on node ports where the ingress controller + // is deployed. + // + // In this configuration, the ingress controller deployment uses host + // networking, bound to node ports 80 and 443. The user is responsible for + // configuring an external load balancer to publish the ingress controller via + // the node ports. + // + // * Private + // + // Does not publish the ingress controller. + // + // In this configuration, the ingress controller deployment uses container + // networking, and is not explicitly published. The user must manually publish + // the ingress controller. + // + // * NodePortService + // + // Publishes the ingress controller using a Kubernetes NodePort Service. + // + // In this configuration, the ingress controller deployment uses container + // networking. A NodePort Service is created to publish the deployment. The + // specific node ports are dynamically allocated by OpenShift; however, to + // support static port allocations, user changes to the node port + // field of the managed NodePort Service will preserved. + // + // +unionDiscriminator + // +required + Type EndpointPublishingStrategyType `json:"type"` + + // loadBalancer holds parameters for the load balancer. Present only if + // type is LoadBalancerService. + // +optional + LoadBalancer *LoadBalancerStrategy `json:"loadBalancer,omitempty"` + + // hostNetwork holds parameters for the HostNetwork endpoint publishing + // strategy. Present only if type is HostNetwork. + // +optional + HostNetwork *HostNetworkStrategy `json:"hostNetwork,omitempty"` + + // private holds parameters for the Private endpoint publishing + // strategy. Present only if type is Private. + // +optional + Private *PrivateStrategy `json:"private,omitempty"` + + // nodePort holds parameters for the NodePortService endpoint publishing strategy. + // Present only if type is NodePortService. + // +optional + NodePort *NodePortStrategy `json:"nodePort,omitempty"` +} + +// ClientCertificatePolicy describes the policy for client certificates. +// +kubebuilder:validation:Enum="";Required;Optional +type ClientCertificatePolicy string + +const ( + // ClientCertificatePolicyRequired indicates that a client certificate + // should be required. + ClientCertificatePolicyRequired ClientCertificatePolicy = "Required" + + // ClientCertificatePolicyOptional indicates that a client certificate + // should be requested but not required. + ClientCertificatePolicyOptional ClientCertificatePolicy = "Optional" +) + +// ClientTLS specifies TLS configuration to enable client-to-server +// authentication, which can be used for mutual TLS. +type ClientTLS struct { + // clientCertificatePolicy specifies whether the ingress controller + // requires clients to provide certificates. This field accepts the + // values "Required" or "Optional". + // + // Note that the ingress controller only checks client certificates for + // edge-terminated and reencrypt TLS routes; it cannot check + // certificates for cleartext HTTP or passthrough TLS routes. + // + // +required + ClientCertificatePolicy ClientCertificatePolicy `json:"clientCertificatePolicy"` + + // clientCA specifies a configmap containing the PEM-encoded CA + // certificate bundle that should be used to verify a client's + // certificate. The administrator must create this configmap in the + // openshift-config namespace. + // + // +required + ClientCA configv1.ConfigMapNameReference `json:"clientCA"` + + // allowedSubjectPatterns specifies a list of regular expressions that + // should be matched against the distinguished name on a valid client + // certificate to filter requests. The regular expressions must use + // PCRE syntax. If this list is empty, no filtering is performed. If + // the list is nonempty, then at least one pattern must match a client + // certificate's distinguished name or else the ingress controller + // rejects the certificate and denies the connection. + // + // +listType=atomic + // +optional + AllowedSubjectPatterns []string `json:"allowedSubjectPatterns,omitempty"` +} + +// RouteAdmissionPolicy is an admission policy for allowing new route claims. +type RouteAdmissionPolicy struct { + // namespaceOwnership describes how host name claims across namespaces should + // be handled. + // + // Value must be one of: + // + // - Strict: Do not allow routes in different namespaces to claim the same host. + // + // - InterNamespaceAllowed: Allow routes to claim different paths of the same + // host name across namespaces. + // + // If empty, the default is Strict. + // +optional + NamespaceOwnership NamespaceOwnershipCheck `json:"namespaceOwnership,omitempty"` + // wildcardPolicy describes how routes with wildcard policies should + // be handled for the ingress controller. WildcardPolicy controls use + // of routes [1] exposed by the ingress controller based on the route's + // wildcard policy. + // + // [1] https://github.com/openshift/api/blob/master/route/v1/types.go + // + // Note: Updating WildcardPolicy from WildcardsAllowed to WildcardsDisallowed + // will cause admitted routes with a wildcard policy of Subdomain to stop + // working. These routes must be updated to a wildcard policy of None to be + // readmitted by the ingress controller. + // + // WildcardPolicy supports WildcardsAllowed and WildcardsDisallowed values. + // + // If empty, defaults to "WildcardsDisallowed". + // + WildcardPolicy WildcardPolicy `json:"wildcardPolicy,omitempty"` +} + +// WildcardPolicy is a route admission policy component that describes how +// routes with a wildcard policy should be handled. +// +kubebuilder:validation:Enum=WildcardsAllowed;WildcardsDisallowed +type WildcardPolicy string + +const ( + // WildcardPolicyAllowed indicates routes with any wildcard policy are + // admitted by the ingress controller. + WildcardPolicyAllowed WildcardPolicy = "WildcardsAllowed" + + // WildcardPolicyDisallowed indicates only routes with a wildcard policy + // of None are admitted by the ingress controller. + WildcardPolicyDisallowed WildcardPolicy = "WildcardsDisallowed" +) + +// NamespaceOwnershipCheck is a route admission policy component that describes +// how host name claims across namespaces should be handled. +// +kubebuilder:validation:Enum=InterNamespaceAllowed;Strict +type NamespaceOwnershipCheck string + +const ( + // InterNamespaceAllowedOwnershipCheck allows routes to claim different paths of the same host name across namespaces. + InterNamespaceAllowedOwnershipCheck NamespaceOwnershipCheck = "InterNamespaceAllowed" + + // StrictNamespaceOwnershipCheck does not allow routes to claim the same host name across namespaces. + StrictNamespaceOwnershipCheck NamespaceOwnershipCheck = "Strict" +) + +// LoggingDestinationType is a type of destination to which to send log +// messages. +// +// +kubebuilder:validation:Enum=Container;Syslog +type LoggingDestinationType string + +const ( + // Container sends log messages to a sidecar container. + ContainerLoggingDestinationType LoggingDestinationType = "Container" + + // Syslog sends log messages to a syslog endpoint. + SyslogLoggingDestinationType LoggingDestinationType = "Syslog" + + // ContainerLoggingSidecarContainerName is the name of the container + // with the log output in an ingress controller pod when container + // logging is used. + ContainerLoggingSidecarContainerName = "logs" +) + +// SyslogLoggingDestinationParameters describes parameters for the Syslog +// logging destination type. +type SyslogLoggingDestinationParameters struct { + // address is the IP address of the syslog endpoint that receives log + // messages. + // + // +required + Address string `json:"address"` + + // port is the UDP port number of the syslog endpoint that receives log + // messages. + // + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +required + Port uint32 `json:"port"` + + // facility specifies the syslog facility of log messages. + // + // If this field is empty, the facility is "local1". + // + // +kubebuilder:validation:Enum=kern;user;mail;daemon;auth;syslog;lpr;news;uucp;cron;auth2;ftp;ntp;audit;alert;cron2;local0;local1;local2;local3;local4;local5;local6;local7 + // +optional + Facility string `json:"facility,omitempty"` + + // maxLength is the maximum length of the log message. + // + // Valid values are integers in the range 480 to 4096, inclusive. + // + // When omitted, the default value is 1024. + // + // +kubebuilder:validation:Maximum=4096 + // +kubebuilder:validation:Minimum=480 + // +kubebuilder:default=1024 + // +default:=1024 + // +optional + MaxLength uint32 `json:"maxLength,omitempty"` +} + +// ContainerLoggingDestinationParameters describes parameters for the Container +// logging destination type. +type ContainerLoggingDestinationParameters struct { + // maxLength is the maximum length of the log message. + // + // Valid values are integers in the range 480 to 8192, inclusive. + // + // When omitted, the default value is 1024. + // + // +kubebuilder:validation:Maximum=8192 + // +kubebuilder:validation:Minimum=480 + // +kubebuilder:default=1024 + // +default:=1024 + // +optional + MaxLength int32 `json:"maxLength,omitempty"` +} + +// LoggingDestination describes a destination for log messages. +// +union +type LoggingDestination struct { + // type is the type of destination for logs. It must be one of the + // following: + // + // * Container + // + // The ingress operator configures the sidecar container named "logs" on + // the ingress controller pod and configures the ingress controller to + // write logs to the sidecar. The logs are then available as container + // logs. The expectation is that the administrator configures a custom + // logging solution that reads logs from this sidecar. Note that using + // container logs means that logs may be dropped if the rate of logs + // exceeds the container runtime's or the custom logging solution's + // capacity. + // + // * Syslog + // + // Logs are sent to a syslog endpoint. The administrator must specify + // an endpoint that can receive syslog messages. The expectation is + // that the administrator has configured a custom syslog instance. + // + // +unionDiscriminator + // +required + Type LoggingDestinationType `json:"type"` + + // syslog holds parameters for a syslog endpoint. Present only if + // type is Syslog. + // + // +optional + Syslog *SyslogLoggingDestinationParameters `json:"syslog,omitempty"` + + // container holds parameters for the Container logging destination. + // Present only if type is Container. + // + // +optional + Container *ContainerLoggingDestinationParameters `json:"container,omitempty"` +} + +// IngressControllerCaptureHTTPHeader describes an HTTP header that should be +// captured. +type IngressControllerCaptureHTTPHeader struct { + // name specifies a header name. Its value must be a valid HTTP header + // name as defined in RFC 2616 section 4.2. + // + // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" + // +required + Name string `json:"name"` + + // maxLength specifies a maximum length for the header value. If a + // header value exceeds this length, the value will be truncated in the + // log message. Note that the ingress controller may impose a separate + // bound on the total length of HTTP headers in a request. + // + // +kubebuilder:validation:Minimum=1 + // +required + MaxLength int `json:"maxLength"` +} + +// IngressControllerCaptureHTTPHeaders specifies which HTTP headers the +// IngressController captures. +type IngressControllerCaptureHTTPHeaders struct { + // request specifies which HTTP request headers to capture. + // + // If this field is empty, no request headers are captured. + // + // +nullable + // +optional + // +listType=atomic + Request []IngressControllerCaptureHTTPHeader `json:"request,omitempty"` + + // response specifies which HTTP response headers to capture. + // + // If this field is empty, no response headers are captured. + // + // +nullable + // +optional + // +listType=atomic + Response []IngressControllerCaptureHTTPHeader `json:"response,omitempty"` +} + +// CookieMatchType indicates the type of matching used against cookie names to +// select a cookie for capture. +// +kubebuilder:validation:Enum=Exact;Prefix +type CookieMatchType string + +const ( + // CookieMatchTypeExact indicates that an exact string match should be + // performed. + CookieMatchTypeExact CookieMatchType = "Exact" + // CookieMatchTypePrefix indicates that a string prefix match should be + // performed. + CookieMatchTypePrefix CookieMatchType = "Prefix" +) + +// IngressControllerCaptureHTTPCookie describes an HTTP cookie that should be +// captured. +type IngressControllerCaptureHTTPCookie struct { + IngressControllerCaptureHTTPCookieUnion `json:",inline"` + + // maxLength specifies a maximum length of the string that will be + // logged, which includes the cookie name, cookie value, and + // one-character delimiter. If the log entry exceeds this length, the + // value will be truncated in the log message. Note that the ingress + // controller may impose a separate bound on the total length of HTTP + // headers in a request. + // + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=1024 + // +required + MaxLength int `json:"maxLength"` +} + +// IngressControllerCaptureHTTPCookieUnion describes optional fields of an HTTP cookie that should be captured. +// +union +type IngressControllerCaptureHTTPCookieUnion struct { + // matchType specifies the type of match to be performed on the cookie + // name. Allowed values are "Exact" for an exact string match and + // "Prefix" for a string prefix match. If "Exact" is specified, a name + // must be specified in the name field. If "Prefix" is provided, a + // prefix must be specified in the namePrefix field. For example, + // specifying matchType "Prefix" and namePrefix "foo" will capture a + // cookie named "foo" or "foobar" but not one named "bar". The first + // matching cookie is captured. + // + // +unionDiscriminator + // +required + MatchType CookieMatchType `json:"matchType,omitempty"` + + // name specifies a cookie name. Its value must be a valid HTTP cookie + // name as defined in RFC 6265 section 4.1. + // + // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]*$" + // +kubebuilder:validation:MinLength=0 + // +kubebuilder:validation:MaxLength=1024 + // +optional + Name string `json:"name"` + + // namePrefix specifies a cookie name prefix. Its value must be a valid + // HTTP cookie name as defined in RFC 6265 section 4.1. + // + // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]*$" + // +kubebuilder:validation:MinLength=0 + // +kubebuilder:validation:MaxLength=1024 + // +optional + NamePrefix string `json:"namePrefix"` +} + +// LoggingPolicy indicates how an event should be logged. +// +kubebuilder:validation:Enum=Log;Ignore +type LoggingPolicy string + +const ( + // LoggingPolicyLog indicates that an event should be logged. + LoggingPolicyLog LoggingPolicy = "Log" + // LoggingPolicyIgnore indicates that an event should not be logged. + LoggingPolicyIgnore LoggingPolicy = "Ignore" +) + +// AccessLogging describes how client requests should be logged. +type AccessLogging struct { + // destination is where access logs go. + // + // +required + Destination LoggingDestination `json:"destination"` + + // httpLogFormat specifies the format of the log message for an HTTP + // request. + // + // If this field is empty, log messages use the implementation's default + // HTTP log format. For HAProxy's default HTTP log format, see the + // HAProxy documentation: + // http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3 + // + // Note that this format only applies to cleartext HTTP connections + // and to secure HTTP connections for which the ingress controller + // terminates encryption (that is, edge-terminated or reencrypt + // connections). It does not affect the log format for TLS passthrough + // connections. + // + // +optional + HttpLogFormat string `json:"httpLogFormat,omitempty"` + + // httpCaptureHeaders defines HTTP headers that should be captured in + // access logs. If this field is empty, no headers are captured. + // + // Note that this option only applies to cleartext HTTP connections + // and to secure HTTP connections for which the ingress controller + // terminates encryption (that is, edge-terminated or reencrypt + // connections). Headers cannot be captured for TLS passthrough + // connections. + // + // +optional + HTTPCaptureHeaders IngressControllerCaptureHTTPHeaders `json:"httpCaptureHeaders,omitempty"` + + // httpCaptureCookies specifies HTTP cookies that should be captured in + // access logs. If this field is empty, no cookies are captured. + // + // +nullable + // +optional + // +kubebuilder:validation:MaxItems=1 + // +listType=atomic + HTTPCaptureCookies []IngressControllerCaptureHTTPCookie `json:"httpCaptureCookies,omitempty"` + + // logEmptyRequests specifies how connections on which no request is + // received should be logged. Typically, these empty requests come from + // load balancers' health probes or Web browsers' speculative + // connections ("preconnect"), in which case logging these requests may + // be undesirable. However, these requests may also be caused by + // network errors, in which case logging empty requests may be useful + // for diagnosing the errors. In addition, these requests may be caused + // by port scans, in which case logging empty requests may aid in + // detecting intrusion attempts. Allowed values for this field are + // "Log" and "Ignore". The default value is "Log". + // + // +optional + // +kubebuilder:default:="Log" + LogEmptyRequests LoggingPolicy `json:"logEmptyRequests,omitempty"` +} + +// IngressControllerLogging describes what should be logged where. +type IngressControllerLogging struct { + // access describes how the client requests should be logged. + // + // If this field is empty, access logging is disabled. + // + // +optional + Access *AccessLogging `json:"access,omitempty"` +} + +// IngressControllerHTTPHeaderPolicy is a policy for setting HTTP headers. +// +// +kubebuilder:validation:Enum=Append;Replace;IfNone;Never +type IngressControllerHTTPHeaderPolicy string + +const ( + // AppendHTTPHeaderPolicy appends the header, preserving any existing header. + AppendHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "Append" + // ReplaceHTTPHeaderPolicy sets the header, removing any existing header. + ReplaceHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "Replace" + // IfNoneHTTPHeaderPolicy sets the header if it is not already set. + IfNoneHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "IfNone" + // NeverHTTPHeaderPolicy never sets the header, preserving any existing + // header. + NeverHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "Never" +) + +// IngressControllerHTTPUniqueIdHeaderPolicy describes configuration for a +// unique id header. +type IngressControllerHTTPUniqueIdHeaderPolicy struct { + // name specifies the name of the HTTP header (for example, "unique-id") + // that the ingress controller should inject into HTTP requests. The + // field's value must be a valid HTTP header name as defined in RFC 2616 + // section 4.2. If the field is empty, no header is injected. + // + // +optional + // +kubebuilder:validation:Pattern="^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" + // +kubebuilder:validation:MinLength=0 + // +kubebuilder:validation:MaxLength=1024 + Name string `json:"name,omitempty"` + + // format specifies the format for the injected HTTP header's value. + // This field has no effect unless name is specified. For the + // HAProxy-based ingress controller implementation, this format uses the + // same syntax as the HTTP log format. If the field is empty, the + // default value is "%{+X}o\\ %ci:%cp_%fi:%fp_%Ts_%rt:%pid"; see the + // corresponding HAProxy documentation: + // http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3 + // + // +optional + // +kubebuilder:validation:Pattern="^(%(%|(\\{[-+]?[QXE](,[-+]?[QXE])*\\})?([A-Za-z]+|\\[[.0-9A-Z_a-z]+(\\([^)]+\\))?(,[.0-9A-Z_a-z]+(\\([^)]+\\))?)*\\]))|[^%[:cntrl:]])*$" + // +kubebuilder:validation:MinLength=0 + // +kubebuilder:validation:MaxLength=1024 + Format string `json:"format,omitempty"` +} + +// IngressControllerHTTPHeaderNameCaseAdjustment is the name of an HTTP header +// (for example, "X-Forwarded-For") in the desired capitalization. The value +// must be a valid HTTP header name as defined in RFC 2616 section 4.2. +// +// +optional +// +kubebuilder:validation:Pattern="^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" +// +kubebuilder:validation:MinLength=0 +// +kubebuilder:validation:MaxLength=1024 +type IngressControllerHTTPHeaderNameCaseAdjustment string + +// IngressControllerHTTPHeaders specifies how the IngressController handles +// certain HTTP headers. +type IngressControllerHTTPHeaders struct { + // forwardedHeaderPolicy specifies when and how the IngressController + // sets the Forwarded, X-Forwarded-For, X-Forwarded-Host, + // X-Forwarded-Port, X-Forwarded-Proto, and X-Forwarded-Proto-Version + // HTTP headers. The value may be one of the following: + // + // * "Append", which specifies that the IngressController appends the + // headers, preserving existing headers. + // + // * "Replace", which specifies that the IngressController sets the + // headers, replacing any existing Forwarded or X-Forwarded-* headers. + // + // * "IfNone", which specifies that the IngressController sets the + // headers if they are not already set. + // + // * "Never", which specifies that the IngressController never sets the + // headers, preserving any existing headers. + // + // By default, the policy is "Append". + // + // +optional + ForwardedHeaderPolicy IngressControllerHTTPHeaderPolicy `json:"forwardedHeaderPolicy,omitempty"` + + // uniqueId describes configuration for a custom HTTP header that the + // ingress controller should inject into incoming HTTP requests. + // Typically, this header is configured to have a value that is unique + // to the HTTP request. The header can be used by applications or + // included in access logs to facilitate tracing individual HTTP + // requests. + // + // If this field is empty, no such header is injected into requests. + // + // +optional + UniqueId IngressControllerHTTPUniqueIdHeaderPolicy `json:"uniqueId,omitempty"` + + // headerNameCaseAdjustments specifies case adjustments that can be + // applied to HTTP header names. Each adjustment is specified as an + // HTTP header name with the desired capitalization. For example, + // specifying "X-Forwarded-For" indicates that the "x-forwarded-for" + // HTTP header should be adjusted to have the specified capitalization. + // + // These adjustments are only applied to cleartext, edge-terminated, and + // re-encrypt routes, and only when using HTTP/1. + // + // For request headers, these adjustments are applied only for routes + // that have the haproxy.router.openshift.io/h1-adjust-case=true + // annotation. For response headers, these adjustments are applied to + // all HTTP responses. + // + // If this field is empty, no request headers are adjusted. + // + // +nullable + // +optional + // +listType=atomic + HeaderNameCaseAdjustments []IngressControllerHTTPHeaderNameCaseAdjustment `json:"headerNameCaseAdjustments,omitempty"` + + // actions specifies options for modifying headers and their values. + // Note that this option only applies to cleartext HTTP connections + // and to secure HTTP connections for which the ingress controller + // terminates encryption (that is, edge-terminated or reencrypt + // connections). Headers cannot be modified for TLS passthrough + // connections. + // Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. `Strict-Transport-Security` + // may only be configured using the "haproxy.router.openshift.io/hsts_header" route annotation, and only in + // accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. + // Any actions defined here are applied after any actions related to the following other fields: + // cache-control, spec.clientTLS, + // spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, + // and spec.httpHeaders.headerNameCaseAdjustments. + // In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after + // the actions specified in the IngressController's spec.httpHeaders.actions field. + // In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be + // executed after the actions specified in the Route's spec.httpHeaders.actions field. + // Headers set using this API cannot be captured for use in access logs. + // The following header names are reserved and may not be modified via this API: + // Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. + // Note that the total size of all net added headers *after* interpolating dynamic values + // must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the + // IngressController. Please refer to the documentation + // for that API field for more details. + // +optional + Actions IngressControllerHTTPHeaderActions `json:"actions,omitempty"` +} + +// IngressControllerHTTPHeaderActions defines configuration for actions on HTTP request and response headers. +type IngressControllerHTTPHeaderActions struct { + // response is a list of HTTP response headers to modify. + // Actions defined here will modify the response headers of all requests passing through an ingress controller. + // These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster. + // IngressController actions for response headers will be executed after Route actions. + // Currently, actions may define to either `Set` or `Delete` headers values. + // Actions are applied in sequence as defined in this list. + // A maximum of 20 response header actions may be configured. + // Sample fetchers allowed are "res.hdr" and "ssl_c_der". + // Converters allowed are "lower" and "base64". + // Example header values: "%[res.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". + // +listType=map + // +listMapKey=name + // +optional + // +kubebuilder:validation:MaxItems=20 + // +kubebuilder:validation:XValidation:rule=`self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:res\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$')))`,message="Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are res.hdr, ssl_c_der. Converters allowed are lower, base64." + Response []IngressControllerHTTPHeader `json:"response"` + // request is a list of HTTP request headers to modify. + // Actions defined here will modify the request headers of all requests passing through an ingress controller. + // These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster. + // IngressController actions for request headers will be executed before Route actions. + // Currently, actions may define to either `Set` or `Delete` headers values. + // Actions are applied in sequence as defined in this list. + // A maximum of 20 request header actions may be configured. + // Sample fetchers allowed are "req.hdr" and "ssl_c_der". + // Converters allowed are "lower" and "base64". + // Example header values: "%[req.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". + // + --- + // + Note: Any change to regex mentioned below must be reflected in the CRD validation of route in https://github.com/openshift/library-go/blob/master/pkg/route/validation/validation.go and vice-versa. + // +listType=map + // +listMapKey=name + // +optional + // +kubebuilder:validation:MaxItems=20 + // +kubebuilder:validation:XValidation:rule=`self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:req\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$')))`,message="Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64." + Request []IngressControllerHTTPHeader `json:"request"` +} + +// IngressControllerHTTPHeader specifies configuration for setting or deleting an HTTP header. +type IngressControllerHTTPHeader struct { + // name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header + // name as defined in RFC 2616 section 4.2. + // The name must consist only of alphanumeric and the following special characters, "-!#$%&'*+.^_`". + // The following header names are reserved and may not be modified via this API: + // Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. + // It must be no more than 255 characters in length. + // Header name must be unique. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'strict-transport-security'",message="strict-transport-security header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'proxy'",message="proxy header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'host'",message="host header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'cookie'",message="cookie header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'set-cookie'",message="set-cookie header may not be modified via header actions" + Name string `json:"name"` + // action specifies actions to perform on headers, such as setting or deleting headers. + // +required + Action IngressControllerHTTPHeaderActionUnion `json:"action"` +} + +// IngressControllerHTTPHeaderActionUnion specifies an action to take on an HTTP header. +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Set' ? has(self.set) : !has(self.set)",message="set is required when type is Set, and forbidden otherwise" +// +union +type IngressControllerHTTPHeaderActionUnion struct { + // type defines the type of the action to be applied on the header. + // Possible values are Set or Delete. + // Set allows you to set HTTP request and response headers. + // Delete allows you to delete HTTP request and response headers. + // +unionDiscriminator + // +kubebuilder:validation:Enum:=Set;Delete + // +required + Type IngressControllerHTTPHeaderActionType `json:"type"` + + // set specifies how the HTTP header should be set. + // This field is required when type is Set and forbidden otherwise. + // +optional + // +unionMember + Set *IngressControllerSetHTTPHeader `json:"set,omitempty"` +} + +// IngressControllerHTTPHeaderActionType defines actions that can be performed on HTTP headers. +type IngressControllerHTTPHeaderActionType string + +const ( + // Set specifies that an HTTP header should be set. + Set IngressControllerHTTPHeaderActionType = "Set" + // Delete specifies that an HTTP header should be deleted. + Delete IngressControllerHTTPHeaderActionType = "Delete" +) + +// IngressControllerSetHTTPHeader defines the value which needs to be set on an HTTP header. +type IngressControllerSetHTTPHeader struct { + // value specifies a header value. + // Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in + // http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and + // otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. + // The value of this field must be no more than 16384 characters in length. + // Note that the total size of all net added headers *after* interpolating dynamic values + // must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the + // IngressController. + // + --- + // + Note: This limit was selected as most common web servers have a limit of 16384 characters or some lower limit. + // + See . + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=16384 + Value string `json:"value"` +} + +// IngressControllerTuningOptions specifies options for tuning the performance +// of ingress controller pods +type IngressControllerTuningOptions struct { + // headerBufferBytes describes how much memory should be reserved + // (in bytes) for IngressController connection sessions. + // Note that this value must be at least 16384 if HTTP/2 is + // enabled for the IngressController (https://tools.ietf.org/html/rfc7540). + // If this field is empty, the IngressController will use a default value + // of 32768 bytes. + // + // Setting this field is generally not recommended as headerBufferBytes + // values that are too small may break the IngressController and + // headerBufferBytes values that are too large could cause the + // IngressController to use significantly more memory than necessary. + // + // +kubebuilder:validation:Minimum=16384 + // +optional + HeaderBufferBytes int32 `json:"headerBufferBytes,omitempty"` + + // headerBufferMaxRewriteBytes describes how much memory should be reserved + // (in bytes) from headerBufferBytes for HTTP header rewriting + // and appending for IngressController connection sessions. + // Note that incoming HTTP requests will be limited to + // (headerBufferBytes - headerBufferMaxRewriteBytes) bytes, meaning + // headerBufferBytes must be greater than headerBufferMaxRewriteBytes. + // If this field is empty, the IngressController will use a default value + // of 8192 bytes. + // + // Setting this field is generally not recommended as + // headerBufferMaxRewriteBytes values that are too small may break the + // IngressController and headerBufferMaxRewriteBytes values that are too + // large could cause the IngressController to use significantly more memory + // than necessary. + // + // +kubebuilder:validation:Minimum=4096 + // +optional + HeaderBufferMaxRewriteBytes int32 `json:"headerBufferMaxRewriteBytes,omitempty"` + + // threadCount defines the number of threads created per HAProxy process. + // Creating more threads allows each ingress controller pod to handle more + // connections, at the cost of more system resources being used. HAProxy + // currently supports up to 64 threads. If this field is empty, the + // IngressController will use the default value. The current default is 4 + // threads, but this may change in future releases. + // + // Setting this field is generally not recommended. Increasing the number + // of HAProxy threads allows ingress controller pods to utilize more CPU + // time under load, potentially starving other pods if set too high. + // Reducing the number of threads may cause the ingress controller to + // perform poorly. + // + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=64 + // +optional + ThreadCount int32 `json:"threadCount,omitempty"` + + // clientTimeout defines how long a connection will be held open while + // waiting for a client response. + // + // If unset, the default timeout is 30s + // +kubebuilder:validation:Format=duration + // +optional + ClientTimeout *metav1.Duration `json:"clientTimeout,omitempty"` + + // clientFinTimeout defines how long a connection will be held open while + // waiting for the client response to the server/backend closing the + // connection. + // + // If unset, the default timeout is 1s + // +kubebuilder:validation:Format=duration + // +optional + ClientFinTimeout *metav1.Duration `json:"clientFinTimeout,omitempty"` + + // serverTimeout defines how long a connection will be held open while + // waiting for a server/backend response. + // + // If unset, the default timeout is 30s + // +kubebuilder:validation:Format=duration + // +optional + ServerTimeout *metav1.Duration `json:"serverTimeout,omitempty"` + + // serverFinTimeout defines how long a connection will be held open while + // waiting for the server/backend response to the client closing the + // connection. + // + // If unset, the default timeout is 1s + // +kubebuilder:validation:Format=duration + // +optional + ServerFinTimeout *metav1.Duration `json:"serverFinTimeout,omitempty"` + + // tunnelTimeout defines how long a tunnel connection (including + // websockets) will be held open while the tunnel is idle. + // + // If unset, the default timeout is 1h + // +kubebuilder:validation:Format=duration + // +optional + TunnelTimeout *metav1.Duration `json:"tunnelTimeout,omitempty"` + + // connectTimeout defines the maximum time to wait for + // a connection attempt to a server/backend to succeed. + // + // This field expects an unsigned duration string of decimal numbers, each with optional + // fraction and a unit suffix, e.g. "300ms", "1.5h" or "2h45m". + // Valid time units are "ns", "us" (or "µs" U+00B5 or "μs" U+03BC), "ms", "s", "m", "h". + // + // When omitted, this means the user has no opinion and the platform is left + // to choose a reasonable default. This default is subject to change over time. + // The current default is 5s. + // + // +kubebuilder:validation:Pattern=^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ + // +kubebuilder:validation:Type:=string + // +optional + ConnectTimeout *metav1.Duration `json:"connectTimeout,omitempty"` + + // tlsInspectDelay defines how long the router can hold data to find a + // matching route. + // + // Setting this too short can cause the router to fall back to the default + // certificate for edge-terminated or reencrypt routes even when a better + // matching certificate could be used. + // + // If unset, the default inspect delay is 5s + // +kubebuilder:validation:Format=duration + // +optional + TLSInspectDelay *metav1.Duration `json:"tlsInspectDelay,omitempty"` + + // healthCheckInterval defines how long the router waits between two consecutive + // health checks on its configured backends. This value is applied globally as + // a default for all routes, but may be overridden per-route by the route annotation + // "router.openshift.io/haproxy.health.check.interval". + // + // Expects an unsigned duration string of decimal numbers, each with optional + // fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m". + // Valid time units are "ns", "us" (or "µs" U+00B5 or "μs" U+03BC), "ms", "s", "m", "h". + // + // Setting this to less than 5s can cause excess traffic due to too frequent + // TCP health checks and accompanying SYN packet storms. Alternatively, setting + // this too high can result in increased latency, due to backend servers that are no + // longer available, but haven't yet been detected as such. + // + // An empty or zero healthCheckInterval means no opinion and IngressController chooses + // a default, which is subject to change over time. + // Currently the default healthCheckInterval value is 5s. + // + // Currently the minimum allowed value is 1s and the maximum allowed value is + // 2147483647ms (24.85 days). Both are subject to change over time. + // + // +kubebuilder:validation:Pattern=^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ + // +kubebuilder:validation:Type:=string + // +optional + HealthCheckInterval *metav1.Duration `json:"healthCheckInterval,omitempty"` + + // maxConnections defines the maximum number of simultaneous + // connections that can be established per HAProxy process. + // Increasing this value allows each ingress controller pod to + // handle more connections but at the cost of additional + // system resources being consumed. + // + // Permitted values are: empty, 0, -1, and the range + // 2000-2000000. + // + // If this field is empty or 0, the IngressController will use + // the default value of 50000, but the default is subject to + // change in future releases. + // + // If the value is -1 then HAProxy will dynamically compute a + // maximum value based on the available ulimits in the running + // container. Selecting -1 (i.e., auto) will result in a large + // value being computed (~520000 on OpenShift >=4.10 clusters) + // and therefore each HAProxy process will incur significant + // memory usage compared to the current default of 50000. + // + // Setting a value that is greater than the current operating + // system limit will prevent the HAProxy process from + // starting. + // + // If you choose a discrete value (e.g., 750000) and the + // router pod is migrated to a new node, there's no guarantee + // that that new node has identical ulimits configured. In + // such a scenario the pod would fail to start. If you have + // nodes with different ulimits configured (e.g., different + // tuned profiles) and you choose a discrete value then the + // guidance is to use -1 and let the value be computed + // dynamically at runtime. + // + // You can monitor memory usage for router containers with the + // following metric: + // 'container_memory_working_set_bytes{container="router",namespace="openshift-ingress"}'. + // + // You can monitor memory usage of individual HAProxy + // processes in router containers with the following metric: + // 'container_memory_working_set_bytes{container="router",namespace="openshift-ingress"}/container_processes{container="router",namespace="openshift-ingress"}'. + // + // +optional + MaxConnections int32 `json:"maxConnections,omitempty"` + + // reloadInterval defines the minimum interval at which the router is allowed to reload + // to accept new changes. Increasing this value can prevent the accumulation of + // HAProxy processes, depending on the scenario. Increasing this interval can + // also lessen load imbalance on a backend's servers when using the roundrobin + // balancing algorithm. Alternatively, decreasing this value may decrease latency + // since updates to HAProxy's configuration can take effect more quickly. + // + // The value must be a time duration value; see . + // Currently, the minimum value allowed is 1s, and the maximum allowed value is + // 120s. Minimum and maximum allowed values may change in future versions of OpenShift. + // Note that if a duration outside of these bounds is provided, the value of reloadInterval + // will be capped/floored and not rejected (e.g. a duration of over 120s will be capped to + // 120s; the IngressController will not reject and replace this disallowed value with + // the default). + // + // A zero value for reloadInterval tells the IngressController to choose the default, + // which is currently 5s and subject to change without notice. + // + // This field expects an unsigned duration string of decimal numbers, each with optional + // fraction and a unit suffix, e.g. "300ms", "1.5h" or "2h45m". + // Valid time units are "ns", "us" (or "µs" U+00B5 or "μs" U+03BC), "ms", "s", "m", "h". + // + // Note: Setting a value significantly larger than the default of 5s can cause latency + // in observing updates to routes and their endpoints. HAProxy's configuration will + // be reloaded less frequently, and newly created routes will not be served until the + // subsequent reload. + // + // +kubebuilder:validation:Pattern=^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ + // +kubebuilder:validation:Type:=string + // +optional + ReloadInterval metav1.Duration `json:"reloadInterval,omitempty"` +} + +// HTTPEmptyRequestsPolicy indicates how HTTP connections for which no request +// is received should be handled. +// +kubebuilder:validation:Enum=Respond;Ignore +type HTTPEmptyRequestsPolicy string + +const ( + // HTTPEmptyRequestsPolicyRespond indicates that the ingress controller + // should respond to empty requests. + HTTPEmptyRequestsPolicyRespond HTTPEmptyRequestsPolicy = "Respond" + // HTTPEmptyRequestsPolicyIgnore indicates that the ingress controller + // should ignore empty requests. + HTTPEmptyRequestsPolicyIgnore HTTPEmptyRequestsPolicy = "Ignore" +) + +var ( + // Available indicates the ingress controller deployment is available. + IngressControllerAvailableConditionType = "Available" + // LoadBalancerManaged indicates the management status of any load balancer + // service associated with an ingress controller. + LoadBalancerManagedIngressConditionType = "LoadBalancerManaged" + // LoadBalancerReady indicates the ready state of any load balancer service + // associated with an ingress controller. + LoadBalancerReadyIngressConditionType = "LoadBalancerReady" + // DNSManaged indicates the management status of any DNS records for the + // ingress controller. + DNSManagedIngressConditionType = "DNSManaged" + // DNSReady indicates the ready state of any DNS records for the ingress + // controller. + DNSReadyIngressConditionType = "DNSReady" +) + +// IngressControllerStatus defines the observed status of the IngressController. +type IngressControllerStatus struct { + // availableReplicas is number of observed available replicas according to the + // ingress controller deployment. + AvailableReplicas int32 `json:"availableReplicas"` + + // selector is a label selector, in string format, for ingress controller pods + // corresponding to the IngressController. The number of matching pods should + // equal the value of availableReplicas. + Selector string `json:"selector"` + + // domain is the actual domain in use. + Domain string `json:"domain"` + + // endpointPublishingStrategy is the actual strategy in use. + EndpointPublishingStrategy *EndpointPublishingStrategy `json:"endpointPublishingStrategy,omitempty"` + + // conditions is a list of conditions and their status. + // + // Available means the ingress controller deployment is available and + // servicing route and ingress resources (i.e, .status.availableReplicas + // equals .spec.replicas) + // + // There are additional conditions which indicate the status of other + // ingress controller features and capabilities. + // + // * LoadBalancerManaged + // - True if the following conditions are met: + // * The endpoint publishing strategy requires a service load balancer. + // - False if any of those conditions are unsatisfied. + // + // * LoadBalancerReady + // - True if the following conditions are met: + // * A load balancer is managed. + // * The load balancer is ready. + // - False if any of those conditions are unsatisfied. + // + // * DNSManaged + // - True if the following conditions are met: + // * The endpoint publishing strategy and platform support DNS. + // * The ingress controller domain is set. + // * dns.config.openshift.io/cluster configures DNS zones. + // - False if any of those conditions are unsatisfied. + // + // * DNSReady + // - True if the following conditions are met: + // * DNS is managed. + // * DNS records have been successfully created. + // - False if any of those conditions are unsatisfied. + // +listType=map + // +listMapKey=type + Conditions []OperatorCondition `json:"conditions,omitempty"` + + // tlsProfile is the TLS connection configuration that is in effect. + // +optional + TLSProfile *configv1.TLSProfileSpec `json:"tlsProfile,omitempty"` + + // observedGeneration is the most recent generation observed. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // namespaceSelector is the actual namespaceSelector in use. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` + + // routeSelector is the actual routeSelector in use. + // +optional + RouteSelector *metav1.LabelSelector `json:"routeSelector,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IngressControllerList contains a list of IngressControllers. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type IngressControllerList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + Items []IngressController `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_insights.go b/vendor/github.com/openshift/api/operator/v1/types_insights.go new file mode 100644 index 0000000000000..ed59bb438b783 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_insights.go @@ -0,0 +1,156 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=insightsoperators,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1237 +// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=insights,operatorOrdering=00 +// +// InsightsOperator holds cluster-wide information about the Insights Operator. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type InsightsOperator struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Insights. + // +required + Spec InsightsOperatorSpec `json:"spec"` + + // status is the most recently observed status of the Insights operator. + // +optional + Status InsightsOperatorStatus `json:"status"` +} + +type InsightsOperatorSpec struct { + OperatorSpec `json:",inline"` +} + +type InsightsOperatorStatus struct { + OperatorStatus `json:",inline"` + // gatherStatus provides basic information about the last Insights data gathering. + // When omitted, this means no data gathering has taken place yet. + // +optional + GatherStatus GatherStatus `json:"gatherStatus,omitempty"` + // insightsReport provides general Insights analysis results. + // When omitted, this means no data gathering has taken place yet. + // +optional + InsightsReport InsightsReport `json:"insightsReport,omitempty"` +} + +// gatherStatus provides information about the last known gather event. +type GatherStatus struct { + // lastGatherTime is the last time when Insights data gathering finished. + // An empty value means that no data has been gathered yet. + // +optional + LastGatherTime metav1.Time `json:"lastGatherTime,omitempty"` + // lastGatherDuration is the total time taken to process + // all gatherers during the last gather event. + // +optional + // +kubebuilder:validation:Pattern="^(0|([0-9]+(?:\\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$" + // +kubebuilder:validation:Type=string + LastGatherDuration metav1.Duration `json:"lastGatherDuration,omitempty"` + // gatherers is a list of active gatherers (and their statuses) in the last gathering. + // +listType=atomic + // +optional + Gatherers []GathererStatus `json:"gatherers,omitempty"` +} + +// insightsReport provides Insights health check report based on the most +// recently sent Insights data. +type InsightsReport struct { + // downloadedAt is the time when the last Insights report was downloaded. + // An empty value means that there has not been any Insights report downloaded yet and + // it usually appears in disconnected clusters (or clusters when the Insights data gathering is disabled). + // +optional + DownloadedAt metav1.Time `json:"downloadedAt,omitempty"` + // healthChecks provides basic information about active Insights health checks + // in a cluster. + // +listType=atomic + // +optional + HealthChecks []HealthCheck `json:"healthChecks,omitempty"` +} + +// healthCheck represents an Insights health check attributes. +type HealthCheck struct { + // description provides basic description of the healtcheck. + // +required + // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:MinLength=10 + Description string `json:"description"` + // totalRisk of the healthcheck. Indicator of the total risk posed + // by the detected issue; combination of impact and likelihood. The values can be from 1 to 4, + // and the higher the number, the more important the issue. + // +required + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=4 + TotalRisk int32 `json:"totalRisk"` + // advisorURI provides the URL link to the Insights Advisor. + // +required + // +kubebuilder:validation:Pattern=`^https:\/\/\S+` + AdvisorURI string `json:"advisorURI"` + // state determines what the current state of the health check is. + // Health check is enabled by default and can be disabled + // by the user in the Insights advisor user interface. + // +required + State HealthCheckState `json:"state"` +} + +// healthCheckState provides information about the status of the +// health check (for example, the health check may be marked as disabled by the user). +// +kubebuilder:validation:Enum:=Enabled;Disabled +type HealthCheckState string + +const ( + // enabled marks the health check as enabled + HealthCheckEnabled HealthCheckState = "Enabled" + // disabled marks the health check as disabled + HealthCheckDisabled HealthCheckState = "Disabled" +) + +// gathererStatus represents information about a particular +// data gatherer. +type GathererStatus struct { + // conditions provide details on the status of each gatherer. + // +listType=atomic + // +required + // +kubebuilder:validation:MinItems=1 + Conditions []metav1.Condition `json:"conditions"` + // name is the name of the gatherer. + // +required + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:MinLength=5 + Name string `json:"name"` + // lastGatherDuration represents the time spent gathering. + // +required + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^(([0-9]+(?:\\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$" + LastGatherDuration metav1.Duration `json:"lastGatherDuration"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InsightsOperatorList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type InsightsOperatorList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []InsightsOperator `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go new file mode 100644 index 0000000000000..ce00b4b62cfcf --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go @@ -0,0 +1,82 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=kubeapiservers,scope=Cluster,categories=coreoperators +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475 +// +openshift:file-pattern=cvoRunLevel=0000_20,operatorName=kube-apiserver,operatorOrdering=01 + +// KubeAPIServer provides information to configure an operator to manage kube-apiserver. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +openshift:compatibility-gen:level=1 +type KubeAPIServer struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Kubernetes API Server + // +required + Spec KubeAPIServerSpec `json:"spec"` + + // status is the most recently observed status of the Kubernetes API Server + // +optional + Status KubeAPIServerStatus `json:"status"` +} + +type KubeAPIServerSpec struct { + StaticPodOperatorSpec `json:",inline"` +} + +type KubeAPIServerStatus struct { + StaticPodOperatorStatus `json:",inline"` + + // serviceAccountIssuers tracks history of used service account issuers. + // The item without expiration time represents the currently used service account issuer. + // The other items represents service account issuers that were used previously and are still being trusted. + // The default expiration for the items is set by the platform and it defaults to 24h. + // see: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection + // +optional + ServiceAccountIssuers []ServiceAccountIssuerStatus `json:"serviceAccountIssuers,omitempty"` +} + +type ServiceAccountIssuerStatus struct { + // name is the name of the service account issuer + // --- + // + This value comes from the serviceAccountIssuer field on the authentication.config.openshift.io/v1 resource. + // + As the authentication field is not validated, we cannot apply validation here else this may cause the controller + // + to error when trying to update this status field. + Name string `json:"name"` + + // expirationTime is the time after which this service account issuer will be pruned and removed from the trusted list + // of service account issuers. + // +optional + ExpirationTime *metav1.Time `json:"expirationTime,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeAPIServerList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type KubeAPIServerList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []KubeAPIServer `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go new file mode 100644 index 0000000000000..ee104aa5065c2 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go @@ -0,0 +1,67 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=kubecontrollermanagers,scope=Cluster,categories=coreoperators +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475 +// +openshift:file-pattern=cvoRunLevel=0000_25,operatorName=kube-controller-manager,operatorOrdering=01 + +// KubeControllerManager provides information to configure an operator to manage kube-controller-manager. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type KubeControllerManager struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Kubernetes Controller Manager + // +required + Spec KubeControllerManagerSpec `json:"spec"` + + // status is the most recently observed status of the Kubernetes Controller Manager + // +optional + Status KubeControllerManagerStatus `json:"status"` +} + +type KubeControllerManagerSpec struct { + StaticPodOperatorSpec `json:",inline"` + + // useMoreSecureServiceCA indicates that the service-ca.crt provided in SA token volumes should include only + // enough certificates to validate service serving certificates. + // Once set to true, it cannot be set to false. + // Even if someone finds a way to set it back to false, the service-ca.crt files that previously existed will + // only have the more secure content. + // +kubebuilder:default=false + UseMoreSecureServiceCA bool `json:"useMoreSecureServiceCA"` +} + +type KubeControllerManagerStatus struct { + StaticPodOperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeControllerManagerList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type KubeControllerManagerList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []KubeControllerManager `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go b/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go new file mode 100644 index 0000000000000..f3add49101cc6 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go @@ -0,0 +1,56 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=kubestorageversionmigrators,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/503 +// +openshift:file-pattern=cvoRunLevel=0000_40,operatorName=kube-storage-version-migrator,operatorOrdering=00 + +// KubeStorageVersionMigrator provides information to configure an operator to manage kube-storage-version-migrator. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type KubeStorageVersionMigrator struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // +required + Spec KubeStorageVersionMigratorSpec `json:"spec"` + // +optional + Status KubeStorageVersionMigratorStatus `json:"status"` +} + +type KubeStorageVersionMigratorSpec struct { + OperatorSpec `json:",inline"` +} + +type KubeStorageVersionMigratorStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeStorageVersionMigratorList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type KubeStorageVersionMigratorList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []KubeStorageVersionMigrator `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go new file mode 100644 index 0000000000000..88b89f81884bf --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go @@ -0,0 +1,509 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=machineconfigurations,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1453 +// +openshift:file-pattern=cvoRunLevel=0000_80,operatorName=machine-config,operatorOrdering=01 + +// MachineConfiguration provides information to configure an operator to manage Machine Configuration. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type MachineConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Machine Config Operator + // +required + Spec MachineConfigurationSpec `json:"spec"` + + // status is the most recently observed status of the Machine Config Operator + // +optional + Status MachineConfigurationStatus `json:"status"` +} + +type MachineConfigurationSpec struct { + StaticPodOperatorSpec `json:",inline"` + + // TODO(jkyros): This is where we put our knobs and dials + + // managedBootImages allows configuration for the management of boot images for machine + // resources within the cluster. This configuration allows users to select resources that should + // be updated to the latest boot images during cluster upgrades, ensuring that new machines + // always boot with the current cluster version's boot image. When omitted, no boot images + // will be updated. + // +openshift:enable:FeatureGate=ManagedBootImages + // +optional + ManagedBootImages ManagedBootImages `json:"managedBootImages"` + + // nodeDisruptionPolicy allows an admin to set granular node disruption actions for + // MachineConfig-based updates, such as drains, service reloads, etc. Specifying this will allow + // for less downtime when doing small configuration updates to the cluster. This configuration + // has no effect on cluster upgrades which will still incur node disruption where required. + // +openshift:enable:FeatureGate=NodeDisruptionPolicy + // +optional + NodeDisruptionPolicy NodeDisruptionPolicyConfig `json:"nodeDisruptionPolicy"` +} + +type MachineConfigurationStatus struct { + // observedGeneration is the last generation change you've dealt with + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // conditions is a list of conditions and their status + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + + // Previously there was a StaticPodOperatorStatus here for legacy reasons. Many of the fields within + // it are no longer relevant for the MachineConfiguration CRD's functions. The following remainder + // fields were tombstoned after lifting out StaticPodOperatorStatus. To avoid conflicts with + // serialisation, the following field names may never be used again. + + // Tombstone: legacy field from StaticPodOperatorStatus + // Version string `json:"version,omitempty"` + + // Tombstone: legacy field from StaticPodOperatorStatus + // ReadyReplicas int32 `json:"readyReplicas"` + + // Tombstone: legacy field from StaticPodOperatorStatus + // Generations []GenerationStatus `json:"generations,omitempty"` + + // Tombstone: legacy field from StaticPodOperatorStatus + // LatestAvailableRevision int32 `json:"latestAvailableRevision,omitempty"` + + // Tombstone: legacy field from StaticPodOperatorStatus + // LatestAvailableRevisionReason string `json:"latestAvailableRevisionReason,omitempty"` + + // Tombstone: legacy field from StaticPodOperatorStatus + // NodeStatuses []NodeStatus `json:"nodeStatuses,omitempty"` + + // nodeDisruptionPolicyStatus status reflects what the latest cluster-validated policies are, + // and will be used by the Machine Config Daemon during future node updates. + // +openshift:enable:FeatureGate=NodeDisruptionPolicy + // +optional + NodeDisruptionPolicyStatus NodeDisruptionPolicyStatus `json:"nodeDisruptionPolicyStatus"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachineConfigurationList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type MachineConfigurationList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []MachineConfiguration `json:"items"` +} + +type ManagedBootImages struct { + // machineManagers can be used to register machine management resources for boot image updates. The Machine Config Operator + // will watch for changes to this list. Only one entry is permitted per type of machine management resource. + // +optional + // +listType=map + // +listMapKey=resource + // +listMapKey=apiGroup + MachineManagers []MachineManager `json:"machineManagers"` +} + +// MachineManager describes a target machine resource that is registered for boot image updates. It stores identifying information +// such as the resource type and the API Group of the resource. It also provides granular control via the selection field. +type MachineManager struct { + // resource is the machine management resource's type. + // The only current valid value is machinesets. + // machinesets means that the machine manager will only register resources of the kind MachineSet. + // +required + Resource MachineManagerMachineSetsResourceType `json:"resource"` + + // apiGroup is name of the APIGroup that the machine management resource belongs to. + // The only current valid value is machine.openshift.io. + // machine.openshift.io means that the machine manager will only register resources that belong to OpenShift machine API group. + // +required + APIGroup MachineManagerMachineSetsAPIGroupType `json:"apiGroup"` + + // selection allows granular control of the machine management resources that will be registered for boot image updates. + // +required + Selection MachineManagerSelector `json:"selection"` +} + +// +kubebuilder:validation:XValidation:rule="has(self.mode) && self.mode == 'Partial' ? has(self.partial) : !has(self.partial)",message="Partial is required when type is partial, and forbidden otherwise" +// +union +type MachineManagerSelector struct { + // mode determines how machine managers will be selected for updates. + // Valid values are All and Partial. + // All means that every resource matched by the machine manager will be updated. + // Partial requires specified selector(s) and allows customisation of which resources matched by the machine manager will be updated. + // +unionDiscriminator + // +required + Mode MachineManagerSelectorMode `json:"mode"` + + // partial provides label selector(s) that can be used to match machine management resources. + // Only permitted when mode is set to "Partial". + // +optional + Partial *PartialSelector `json:"partial,omitempty"` +} + +// PartialSelector provides label selector(s) that can be used to match machine management resources. +type PartialSelector struct { + // machineResourceSelector is a label selector that can be used to select machine resources like MachineSets. + // +required + MachineResourceSelector *metav1.LabelSelector `json:"machineResourceSelector,omitempty"` +} + +// MachineManagerSelectorMode is a string enum used in the MachineManagerSelector union discriminator. +// +kubebuilder:validation:Enum:="All";"Partial" +type MachineManagerSelectorMode string + +const ( + // All represents a configuration mode that registers all resources specified by the parent MachineManager for boot image updates. + All MachineManagerSelectorMode = "All" + + // Partial represents a configuration mode that will register resources specified by the parent MachineManager only + // if they match with the label selector. + Partial MachineManagerSelectorMode = "Partial" +) + +// MachineManagerManagedResourceType is a string enum used in the MachineManager type to describe the resource +// type to be registered. +// +kubebuilder:validation:Enum:="machinesets" +type MachineManagerMachineSetsResourceType string + +const ( + // MachineSets represent the MachineSet resource type, which manage a group of machines and belong to the Openshift machine API group. + MachineSets MachineManagerMachineSetsResourceType = "machinesets" +) + +// MachineManagerManagedAPIGroupType is a string enum used in in the MachineManager type to describe the APIGroup +// of the resource type being registered. +// +kubebuilder:validation:Enum:="machine.openshift.io" +type MachineManagerMachineSetsAPIGroupType string + +const ( + // MachineAPI represent the traditional MAPI Group that a machineset may belong to. + // This feature only supports MAPI machinesets at this time. + MachineAPI MachineManagerMachineSetsAPIGroupType = "machine.openshift.io" +) + +type NodeDisruptionPolicyStatus struct { + // clusterPolicies is a merge of cluster default and user provided node disruption policies. + // +optional + ClusterPolicies NodeDisruptionPolicyClusterStatus `json:"clusterPolicies"` +} + +// NodeDisruptionPolicyConfig is the overall spec definition for files/units/sshkeys +type NodeDisruptionPolicyConfig struct { + // files is a list of MachineConfig file definitions and actions to take to changes on those paths + // This list supports a maximum of 50 entries. + // +optional + // +listType=map + // +listMapKey=path + // +kubebuilder:validation:MaxItems=50 + Files []NodeDisruptionPolicySpecFile `json:"files"` + // units is a list MachineConfig unit definitions and actions to take on changes to those services + // This list supports a maximum of 50 entries. + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=50 + Units []NodeDisruptionPolicySpecUnit `json:"units"` + // sshkey maps to the ignition.sshkeys field in the MachineConfig object, definition an action for this + // will apply to all sshkey changes in the cluster + // +optional + SSHKey NodeDisruptionPolicySpecSSHKey `json:"sshkey"` +} + +// NodeDisruptionPolicyClusterStatus is the type for the status object, rendered by the controller as a +// merge of cluster defaults and user provided policies +type NodeDisruptionPolicyClusterStatus struct { + // files is a list of MachineConfig file definitions and actions to take to changes on those paths + // +optional + // +listType=map + // +listMapKey=path + // +kubebuilder:validation:MaxItems=100 + Files []NodeDisruptionPolicyStatusFile `json:"files,omitempty"` + // units is a list MachineConfig unit definitions and actions to take on changes to those services + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=100 + Units []NodeDisruptionPolicyStatusUnit `json:"units,omitempty"` + // sshkey is the overall sshkey MachineConfig definition + // +optional + SSHKey NodeDisruptionPolicyStatusSSHKey `json:"sshkey,omitempty"` +} + +// NodeDisruptionPolicySpecFile is a file entry and corresponding actions to take and is used in the NodeDisruptionPolicyConfig object +type NodeDisruptionPolicySpecFile struct { + // path is the location of a file being managed through a MachineConfig. + // The Actions in the policy will apply to changes to the file at this path. + // +required + Path string `json:"path"` + // actions represents the series of commands to be executed on changes to the file at + // the corresponding file path. Actions will be applied in the order that + // they are set in this list. If there are other incoming changes to other MachineConfig + // entries in the same update that require a reboot, the reboot will supercede these actions. + // Valid actions are Reboot, Drain, Reload, DaemonReload and None. + // The Reboot action and the None action cannot be used in conjunction with any of the other actions. + // This list supports a maximum of 10 entries. + // +required + // +listType=atomic + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='None') ? size(self) == 1 : true", message="None action can only be specified standalone, as it will override any other actions" + Actions []NodeDisruptionPolicySpecAction `json:"actions"` +} + +// NodeDisruptionPolicyStatusFile is a file entry and corresponding actions to take and is used in the NodeDisruptionPolicyClusterStatus object +type NodeDisruptionPolicyStatusFile struct { + // path is the location of a file being managed through a MachineConfig. + // The Actions in the policy will apply to changes to the file at this path. + // +required + Path string `json:"path"` + // actions represents the series of commands to be executed on changes to the file at + // the corresponding file path. Actions will be applied in the order that + // they are set in this list. If there are other incoming changes to other MachineConfig + // entries in the same update that require a reboot, the reboot will supercede these actions. + // Valid actions are Reboot, Drain, Reload, DaemonReload and None. + // The Reboot action and the None action cannot be used in conjunction with any of the other actions. + // This list supports a maximum of 10 entries. + // +required + // +listType=atomic + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='None') ? size(self) == 1 : true", message="None action can only be specified standalone, as it will override any other actions" + Actions []NodeDisruptionPolicyStatusAction `json:"actions"` +} + +// NodeDisruptionPolicySpecUnit is a systemd unit name and corresponding actions to take and is used in the NodeDisruptionPolicyConfig object +type NodeDisruptionPolicySpecUnit struct { + // name represents the service name of a systemd service managed through a MachineConfig + // Actions specified will be applied for changes to the named service. + // Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + // ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + // ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + // +required + Name NodeDisruptionPolicyServiceName `json:"name"` + + // actions represents the series of commands to be executed on changes to the file at + // the corresponding file path. Actions will be applied in the order that + // they are set in this list. If there are other incoming changes to other MachineConfig + // entries in the same update that require a reboot, the reboot will supercede these actions. + // Valid actions are Reboot, Drain, Reload, DaemonReload and None. + // The Reboot action and the None action cannot be used in conjunction with any of the other actions. + // This list supports a maximum of 10 entries. + // +required + // +listType=atomic + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='None') ? size(self) == 1 : true", message="None action can only be specified standalone, as it will override any other actions" + Actions []NodeDisruptionPolicySpecAction `json:"actions"` +} + +// NodeDisruptionPolicyStatusUnit is a systemd unit name and corresponding actions to take and is used in the NodeDisruptionPolicyClusterStatus object +type NodeDisruptionPolicyStatusUnit struct { + // name represents the service name of a systemd service managed through a MachineConfig + // Actions specified will be applied for changes to the named service. + // Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + // ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + // ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + // +required + Name NodeDisruptionPolicyServiceName `json:"name"` + + // actions represents the series of commands to be executed on changes to the file at + // the corresponding file path. Actions will be applied in the order that + // they are set in this list. If there are other incoming changes to other MachineConfig + // entries in the same update that require a reboot, the reboot will supercede these actions. + // Valid actions are Reboot, Drain, Reload, DaemonReload and None. + // The Reboot action and the None action cannot be used in conjunction with any of the other actions. + // This list supports a maximum of 10 entries. + // +required + // +listType=atomic + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='None') ? size(self) == 1 : true", message="None action can only be specified standalone, as it will override any other actions" + Actions []NodeDisruptionPolicyStatusAction `json:"actions"` +} + +// NodeDisruptionPolicySpecSSHKey is actions to take for any SSHKey change and is used in the NodeDisruptionPolicyConfig object +type NodeDisruptionPolicySpecSSHKey struct { + // actions represents the series of commands to be executed on changes to the file at + // the corresponding file path. Actions will be applied in the order that + // they are set in this list. If there are other incoming changes to other MachineConfig + // entries in the same update that require a reboot, the reboot will supercede these actions. + // Valid actions are Reboot, Drain, Reload, DaemonReload and None. + // The Reboot action and the None action cannot be used in conjunction with any of the other actions. + // This list supports a maximum of 10 entries. + // +required + // +listType=atomic + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='None') ? size(self) == 1 : true", message="None action can only be specified standalone, as it will override any other actions" + Actions []NodeDisruptionPolicySpecAction `json:"actions"` +} + +// NodeDisruptionPolicyStatusSSHKey is actions to take for any SSHKey change and is used in the NodeDisruptionPolicyClusterStatus object +type NodeDisruptionPolicyStatusSSHKey struct { + // actions represents the series of commands to be executed on changes to the file at + // the corresponding file path. Actions will be applied in the order that + // they are set in this list. If there are other incoming changes to other MachineConfig + // entries in the same update that require a reboot, the reboot will supercede these actions. + // Valid actions are Reboot, Drain, Reload, DaemonReload and None. + // The Reboot action and the None action cannot be used in conjunction with any of the other actions. + // This list supports a maximum of 10 entries. + // +required + // +listType=atomic + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" + // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='None') ? size(self) == 1 : true", message="None action can only be specified standalone, as it will override any other actions" + Actions []NodeDisruptionPolicyStatusAction `json:"actions"` +} + +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Reload' ? has(self.reload) : !has(self.reload)",message="reload is required when type is Reload, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Restart' ? has(self.restart) : !has(self.restart)",message="restart is required when type is Restart, and forbidden otherwise" +// +union +type NodeDisruptionPolicySpecAction struct { + // type represents the commands that will be carried out if this NodeDisruptionPolicySpecActionType is executed + // Valid values are Reboot, Drain, Reload, Restart, DaemonReload and None. + // reload/restart requires a corresponding service target specified in the reload/restart field. + // Other values require no further configuration + // +unionDiscriminator + // +required + Type NodeDisruptionPolicySpecActionType `json:"type"` + // reload specifies the service to reload, only valid if type is reload + // +optional + Reload *ReloadService `json:"reload,omitempty"` + // restart specifies the service to restart, only valid if type is restart + // +optional + Restart *RestartService `json:"restart,omitempty"` +} + +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Reload' ? has(self.reload) : !has(self.reload)",message="reload is required when type is Reload, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Restart' ? has(self.restart) : !has(self.restart)",message="restart is required when type is Restart, and forbidden otherwise" +// +union +type NodeDisruptionPolicyStatusAction struct { + // type represents the commands that will be carried out if this NodeDisruptionPolicyStatusActionType is executed + // Valid values are Reboot, Drain, Reload, Restart, DaemonReload, None and Special. + // reload/restart requires a corresponding service target specified in the reload/restart field. + // Other values require no further configuration + // +unionDiscriminator + // +required + Type NodeDisruptionPolicyStatusActionType `json:"type"` + // reload specifies the service to reload, only valid if type is reload + // +optional + Reload *ReloadService `json:"reload,omitempty"` + // restart specifies the service to restart, only valid if type is restart + // +optional + Restart *RestartService `json:"restart,omitempty"` +} + +// ReloadService allows the user to specify the services to be reloaded +type ReloadService struct { + // serviceName is the full name (e.g. crio.service) of the service to be reloaded + // Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + // ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + // ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + // +required + ServiceName NodeDisruptionPolicyServiceName `json:"serviceName"` +} + +// RestartService allows the user to specify the services to be restarted +type RestartService struct { + // serviceName is the full name (e.g. crio.service) of the service to be restarted + // Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + // ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + // ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + // +required + ServiceName NodeDisruptionPolicyServiceName `json:"serviceName"` +} + +// NodeDisruptionPolicySpecActionType is a string enum used in a NodeDisruptionPolicySpecAction object. They describe an action to be performed. +// +kubebuilder:validation:Enum:="Reboot";"Drain";"Reload";"Restart";"DaemonReload";"None" +type NodeDisruptionPolicySpecActionType string + +// +kubebuilder:validation:XValidation:rule=`self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$')`, message="Invalid ${SERVICETYPE} in service name. Expected format is ${NAME}${SERVICETYPE}, where ${SERVICETYPE} must be one of \".service\", \".socket\", \".device\", \".mount\", \".automount\", \".swap\", \".target\", \".path\", \".timer\",\".snapshot\", \".slice\" or \".scope\"." +// +kubebuilder:validation:XValidation:rule=`self.matches('^[a-zA-Z0-9:._\\\\-]+\\..')`, message="Invalid ${NAME} in service name. Expected format is ${NAME}${SERVICETYPE}, where {NAME} must be atleast 1 character long and can only consist of alphabets, digits, \":\", \"-\", \"_\", \".\", and \"\\\"" +// +kubebuilder:validation:MaxLength=255 +type NodeDisruptionPolicyServiceName string + +const ( + // Reboot represents an action that will cause nodes to be rebooted. This is the default action by the MCO + // if a reboot policy is not found for a change/update being performed by the MCO. + RebootSpecAction NodeDisruptionPolicySpecActionType = "Reboot" + + // Drain represents an action that will cause nodes to be drained of their workloads. + DrainSpecAction NodeDisruptionPolicySpecActionType = "Drain" + + // Reload represents an action that will cause nodes to reload the service described by the Target field. + ReloadSpecAction NodeDisruptionPolicySpecActionType = "Reload" + + // Restart represents an action that will cause nodes to restart the service described by the Target field. + RestartSpecAction NodeDisruptionPolicySpecActionType = "Restart" + + // DaemonReload represents an action that TBD + DaemonReloadSpecAction NodeDisruptionPolicySpecActionType = "DaemonReload" + + // None represents an action that no handling is required by the MCO. + NoneSpecAction NodeDisruptionPolicySpecActionType = "None" +) + +// NodeDisruptionPolicyStatusActionType is a string enum used in a NodeDisruptionPolicyStatusAction object. They describe an action to be performed. +// The key difference of this object from NodeDisruptionPolicySpecActionType is that there is a additional SpecialStatusAction value in this enum. This will only be +// used by the MCO's controller to indicate some internal actions. They are not part of the NodeDisruptionPolicyConfig object and cannot be set by the user. +// +kubebuilder:validation:Enum:="Reboot";"Drain";"Reload";"Restart";"DaemonReload";"None";"Special" +type NodeDisruptionPolicyStatusActionType string + +const ( + // Reboot represents an action that will cause nodes to be rebooted. This is the default action by the MCO + // if a reboot policy is not found for a change/update being performed by the MCO. + RebootStatusAction NodeDisruptionPolicyStatusActionType = "Reboot" + + // Drain represents an action that will cause nodes to be drained of their workloads. + DrainStatusAction NodeDisruptionPolicyStatusActionType = "Drain" + + // Reload represents an action that will cause nodes to reload the service described by the Target field. + ReloadStatusAction NodeDisruptionPolicyStatusActionType = "Reload" + + // Restart represents an action that will cause nodes to restart the service described by the Target field. + RestartStatusAction NodeDisruptionPolicyStatusActionType = "Restart" + + // DaemonReload represents an action that TBD + DaemonReloadStatusAction NodeDisruptionPolicyStatusActionType = "DaemonReload" + + // None represents an action that no handling is required by the MCO. + NoneStatusAction NodeDisruptionPolicyStatusActionType = "None" + + // Special represents an action that is internal to the MCO, and is not allowed in user defined NodeDisruption policies. + SpecialStatusAction NodeDisruptionPolicyStatusActionType = "Special" +) + +// These strings will be used for MachineConfiguration Status conditions. +const ( + // MachineConfigurationBootImageUpdateDegraded means that the MCO ran into an error while reconciling boot images. This + // will cause the clusteroperators.config.openshift.io/machine-config to degrade. This condition will indicate the cause + // of the degrade, the progress of the update and the generation of the boot images configmap that it degraded on. + MachineConfigurationBootImageUpdateDegraded string = "BootImageUpdateDegraded" + + // MachineConfigurationBootImageUpdateProgressing means that the MCO is in the process of reconciling boot images. This + // will cause the clusteroperators.config.openshift.io/machine-config to be in a Progressing state. This condition will + // indicate the progress of the update and the generation of the boot images configmap that triggered this update. + MachineConfigurationBootImageUpdateProgressing string = "BootImageUpdateProgressing" +) diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go new file mode 100644 index 0000000000000..ba2f3b4712f2f --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_network.go @@ -0,0 +1,870 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=networks,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475 +// +openshift:file-pattern=cvoRunLevel=0000_70,operatorName=network,operatorOrdering=01 + +// Network describes the cluster's desired network configuration. It is +// consumed by the cluster-network-operator. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +k8s:openapi-gen=true +// +openshift:compatibility-gen:level=1 +type Network struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NetworkSpec `json:"spec,omitempty"` + Status NetworkStatus `json:"status,omitempty"` +} + +// NetworkStatus is detailed operator status, which is distilled +// up to the Network clusteroperator object. +type NetworkStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NetworkList contains a list of Network configurations +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type NetworkList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + Items []Network `json:"items"` +} + +// NetworkSpec is the top-level network configuration object. +// +kubebuilder:validation:XValidation:rule="!has(self.defaultNetwork) || !has(self.defaultNetwork.ovnKubernetesConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.gatewayConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding) || self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding == oldSelf.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding || self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding == 'Restricted' || self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding == 'Global'",message="invalid value for IPForwarding, valid values are 'Restricted' or 'Global'" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=RouteAdvertisements,rule="(has(self.additionalRoutingCapabilities) && ('FRR' in self.additionalRoutingCapabilities.providers)) || !has(self.defaultNetwork) || !has(self.defaultNetwork.ovnKubernetesConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements) || self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements != 'Enabled'",message="Route advertisements cannot be Enabled if 'FRR' routing capability provider is not available" +type NetworkSpec struct { + OperatorSpec `json:",inline"` + + // clusterNetwork is the IP address pool to use for pod IPs. + // Some network providers support multiple ClusterNetworks. + // Others only support one. This is equivalent to the cluster-cidr. + // +listType=atomic + ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"` + + // serviceNetwork is the ip address pool to use for Service IPs + // Currently, all existing network providers only support a single value + // here, but this is an array to allow for growth. + // +listType=atomic + ServiceNetwork []string `json:"serviceNetwork"` + + // defaultNetwork is the "default" network that all pods will receive + DefaultNetwork DefaultNetworkDefinition `json:"defaultNetwork"` + + // additionalNetworks is a list of extra networks to make available to pods + // when multiple networks are enabled. + // +listType=map + // +listMapKey=name + AdditionalNetworks []AdditionalNetworkDefinition `json:"additionalNetworks,omitempty"` + + // disableMultiNetwork specifies whether or not multiple pod network + // support should be disabled. If unset, this property defaults to + // 'false' and multiple network support is enabled. + DisableMultiNetwork *bool `json:"disableMultiNetwork,omitempty"` + + // useMultiNetworkPolicy enables a controller which allows for + // MultiNetworkPolicy objects to be used on additional networks as + // created by Multus CNI. MultiNetworkPolicy are similar to NetworkPolicy + // objects, but NetworkPolicy objects only apply to the primary interface. + // With MultiNetworkPolicy, you can control the traffic that a pod can receive + // over the secondary interfaces. If unset, this property defaults to 'false' + // and MultiNetworkPolicy objects are ignored. If 'disableMultiNetwork' is + // 'true' then the value of this field is ignored. + UseMultiNetworkPolicy *bool `json:"useMultiNetworkPolicy,omitempty"` + + // deployKubeProxy specifies whether or not a standalone kube-proxy should + // be deployed by the operator. Some network providers include kube-proxy + // or similar functionality. If unset, the plugin will attempt to select + // the correct value, which is false when ovn-kubernetes is used and true + // otherwise. + // +optional + DeployKubeProxy *bool `json:"deployKubeProxy,omitempty"` + + // disableNetworkDiagnostics specifies whether or not PodNetworkConnectivityCheck + // CRs from a test pod to every node, apiserver and LB should be disabled or not. + // If unset, this property defaults to 'false' and network diagnostics is enabled. + // Setting this to 'true' would reduce the additional load of the pods performing the checks. + // +optional + // +kubebuilder:default:=false + DisableNetworkDiagnostics bool `json:"disableNetworkDiagnostics"` + + // kubeProxyConfig lets us configure desired proxy configuration, if + // deployKubeProxy is true. If not specified, sensible defaults will be chosen by + // OpenShift directly. + KubeProxyConfig *ProxyConfig `json:"kubeProxyConfig,omitempty"` + + // exportNetworkFlows enables and configures the export of network flow metadata from the pod network + // by using protocols NetFlow, SFlow or IPFIX. Currently only supported on OVN-Kubernetes plugin. + // If unset, flows will not be exported to any collector. + // +optional + ExportNetworkFlows *ExportNetworkFlows `json:"exportNetworkFlows,omitempty"` + + // migration enables and configures cluster network migration, for network changes + // that cannot be made instantly. + // +optional + Migration *NetworkMigration `json:"migration,omitempty"` + + // additionalRoutingCapabilities describes components and relevant + // configuration providing additional routing capabilities. When set, it + // enables such components and the usage of the routing capabilities they + // provide for the machine network. Upstream operators, like MetalLB + // operator, requiring these capabilities may rely on, or automatically set + // this attribute. Network plugins may leverage advanced routing + // capabilities acquired through the enablement of these components but may + // require specific configuration on their side to do so; refer to their + // respective documentation and configuration options. + // +openshift:enable:FeatureGate=AdditionalRoutingCapabilities + // +optional + AdditionalRoutingCapabilities *AdditionalRoutingCapabilities `json:"additionalRoutingCapabilities,omitempty"` +} + +// NetworkMigrationMode is an enumeration of the possible mode of the network migration +// Valid values are "Live", "Offline" and omitted. +// DEPRECATED: network type migration is no longer supported. +// +kubebuilder:validation:Enum:=Live;Offline;"" +type NetworkMigrationMode string + +const ( + // A "Live" migration operation will not cause service interruption by migrating the CNI of each node one by one. The cluster network will work as normal during the network migration. + // DEPRECATED: network type migration is no longer supported. + LiveNetworkMigrationMode NetworkMigrationMode = "Live" + // An "Offline" migration operation will cause service interruption. During an "Offline" migration, two rounds of node reboots are required. The cluster network will be malfunctioning during the network migration. + // DEPRECATED: network type migration is no longer supported. + OfflineNetworkMigrationMode NetworkMigrationMode = "Offline" +) + +// NetworkMigration represents the cluster network migration configuration. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=NetworkLiveMigration,rule="!has(self.mtu) || !has(self.networkType) || self.networkType == \"\" || has(self.mode) && self.mode == 'Live'",message="networkType migration in mode other than 'Live' may not be configured at the same time as mtu migration" +type NetworkMigration struct { + // mtu contains the MTU migration configuration. Set this to allow changing + // the MTU values for the default network. If unset, the operation of + // changing the MTU for the default network will be rejected. + // +optional + MTU *MTUMigration `json:"mtu,omitempty"` + + // networkType was previously used when changing the default network type. + // DEPRECATED: network type migration is no longer supported, and setting + // this to a non-empty value will result in the network operator rejecting + // the configuration. + // +optional + NetworkType string `json:"networkType,omitempty"` + + // features was previously used to configure which network plugin features + // would be migrated in a network type migration. + // DEPRECATED: network type migration is no longer supported, and setting + // this to a non-empty value will result in the network operator rejecting + // the configuration. + // +optional + Features *FeaturesMigration `json:"features,omitempty"` + + // mode indicates the mode of network type migration. + // DEPRECATED: network type migration is no longer supported, and setting + // this to a non-empty value will result in the network operator rejecting + // the configuration. + // +optional + Mode NetworkMigrationMode `json:"mode,omitempty"` +} + +type FeaturesMigration struct { + // egressIP specified whether or not the Egress IP configuration was migrated. + // DEPRECATED: network type migration is no longer supported. + // +optional + // +kubebuilder:default:=true + EgressIP bool `json:"egressIP,omitempty"` + // egressFirewall specified whether or not the Egress Firewall configuration was migrated. + // DEPRECATED: network type migration is no longer supported. + // +optional + // +kubebuilder:default:=true + EgressFirewall bool `json:"egressFirewall,omitempty"` + // multicast specified whether or not the multicast configuration was migrated. + // DEPRECATED: network type migration is no longer supported. + // +optional + // +kubebuilder:default:=true + Multicast bool `json:"multicast,omitempty"` +} + +// MTUMigration contains infomation about MTU migration. +type MTUMigration struct { + // network contains information about MTU migration for the default network. + // Migrations are only allowed to MTU values lower than the machine's uplink + // MTU by the minimum appropriate offset. + // +optional + Network *MTUMigrationValues `json:"network,omitempty"` + + // machine contains MTU migration configuration for the machine's uplink. + // Needs to be migrated along with the default network MTU unless the + // current uplink MTU already accommodates the default network MTU. + // +optional + Machine *MTUMigrationValues `json:"machine,omitempty"` +} + +// MTUMigrationValues contains the values for a MTU migration. +type MTUMigrationValues struct { + // to is the MTU to migrate to. + // +kubebuilder:validation:Minimum=0 + To *uint32 `json:"to"` + + // from is the MTU to migrate from. + // +kubebuilder:validation:Minimum=0 + // +optional + From *uint32 `json:"from,omitempty"` +} + +// ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size +// HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If +// the HostPrefix field is not used by the plugin, it can be left unset. +// Not all network providers support multiple ClusterNetworks +type ClusterNetworkEntry struct { + CIDR string `json:"cidr"` + // +kubebuilder:validation:Minimum=0 + // +optional + HostPrefix uint32 `json:"hostPrefix,omitempty"` +} + +// DefaultNetworkDefinition represents a single network plugin's configuration. +// type must be specified, along with exactly one "Config" that matches the type. +type DefaultNetworkDefinition struct { + // type is the type of network + // All NetworkTypes are supported except for NetworkTypeRaw + Type NetworkType `json:"type"` + + // openshiftSDNConfig was previously used to configure the openshift-sdn plugin. + // DEPRECATED: OpenShift SDN is no longer supported. + // +optional + OpenShiftSDNConfig *OpenShiftSDNConfig `json:"openshiftSDNConfig,omitempty"` + + // ovnKubernetesConfig configures the ovn-kubernetes plugin. + // +optional + OVNKubernetesConfig *OVNKubernetesConfig `json:"ovnKubernetesConfig,omitempty"` +} + +// SimpleMacvlanConfig contains configurations for macvlan interface. +type SimpleMacvlanConfig struct { + // master is the host interface to create the macvlan interface from. + // If not specified, it will be default route interface + // +optional + Master string `json:"master,omitempty"` + + // ipamConfig configures IPAM module will be used for IP Address Management (IPAM). + // +optional + IPAMConfig *IPAMConfig `json:"ipamConfig,omitempty"` + + // mode is the macvlan mode: bridge, private, vepa, passthru. The default is bridge + // +optional + Mode MacvlanMode `json:"mode,omitempty"` + + // mtu is the mtu to use for the macvlan interface. if unset, host's + // kernel will select the value. + // +kubebuilder:validation:Minimum=0 + // +optional + MTU uint32 `json:"mtu,omitempty"` +} + +// StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses +type StaticIPAMAddresses struct { + // address is the IP address in CIDR format + // +optional + Address string `json:"address"` + // gateway is IP inside of subnet to designate as the gateway + // +optional + Gateway string `json:"gateway,omitempty"` +} + +// StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes +type StaticIPAMRoutes struct { + // destination points the IP route destination + Destination string `json:"destination"` + // gateway is the route's next-hop IP address + // If unset, a default gateway is assumed (as determined by the CNI plugin). + // +optional + Gateway string `json:"gateway,omitempty"` +} + +// StaticIPAMDNS provides DNS related information for static IPAM +type StaticIPAMDNS struct { + // nameservers points DNS servers for IP lookup + // +optional + // +listType=atomic + Nameservers []string `json:"nameservers,omitempty"` + // domain configures the domainname the local domain used for short hostname lookups + // +optional + Domain string `json:"domain,omitempty"` + // search configures priority ordered search domains for short hostname lookups + // +optional + // +listType=atomic + Search []string `json:"search,omitempty"` +} + +// StaticIPAMConfig contains configurations for static IPAM (IP Address Management) +type StaticIPAMConfig struct { + // addresses configures IP address for the interface + // +optional + // +listType=atomic + Addresses []StaticIPAMAddresses `json:"addresses,omitempty"` + // routes configures IP routes for the interface + // +optional + // +listType=atomic + Routes []StaticIPAMRoutes `json:"routes,omitempty"` + // dns configures DNS for the interface + // +optional + DNS *StaticIPAMDNS `json:"dns,omitempty"` +} + +// IPAMConfig contains configurations for IPAM (IP Address Management) +type IPAMConfig struct { + // type is the type of IPAM module will be used for IP Address Management(IPAM). + // The supported values are IPAMTypeDHCP, IPAMTypeStatic + Type IPAMType `json:"type"` + + // staticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic + // +optional + StaticIPAMConfig *StaticIPAMConfig `json:"staticIPAMConfig,omitempty"` +} + +// AdditionalNetworkDefinition configures an extra network that is available but not +// created by default. Instead, pods must request them by name. +// type must be specified, along with exactly one "Config" that matches the type. +type AdditionalNetworkDefinition struct { + // type is the type of network + // The supported values are NetworkTypeRaw, NetworkTypeSimpleMacvlan + Type NetworkType `json:"type"` + + // name is the name of the network. This will be populated in the resulting CRD + // This must be unique. + // +required + Name string `json:"name"` + + // namespace is the namespace of the network. This will be populated in the resulting CRD + // If not given the network will be created in the default namespace. + Namespace string `json:"namespace,omitempty"` + + // rawCNIConfig is the raw CNI configuration json to create in the + // NetworkAttachmentDefinition CRD + RawCNIConfig string `json:"rawCNIConfig,omitempty"` + + // simpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan + // +optional + SimpleMacvlanConfig *SimpleMacvlanConfig `json:"simpleMacvlanConfig,omitempty"` +} + +// OpenShiftSDNConfig was used to configure the OpenShift SDN plugin. It is no longer used. +type OpenShiftSDNConfig struct { + // mode is one of "Multitenant", "Subnet", or "NetworkPolicy" + Mode SDNMode `json:"mode"` + + // vxlanPort is the port to use for all vxlan packets. The default is 4789. + // +kubebuilder:validation:Minimum=0 + // +optional + VXLANPort *uint32 `json:"vxlanPort,omitempty"` + + // mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. + // This must be 50 bytes smaller than the machine's uplink. + // +kubebuilder:validation:Minimum=0 + // +optional + MTU *uint32 `json:"mtu,omitempty"` + + // useExternalOpenvswitch used to control whether the operator would deploy an OVS + // DaemonSet itself or expect someone else to start OVS. As of 4.6, OVS is always + // run as a system service, and this flag is ignored. + // +optional + UseExternalOpenvswitch *bool `json:"useExternalOpenvswitch,omitempty"` + + // enableUnidling controls whether or not the service proxy will support idling + // and unidling of services. By default, unidling is enabled. + EnableUnidling *bool `json:"enableUnidling,omitempty"` +} + +// ovnKubernetesConfig contains the configuration parameters for networks +// using the ovn-kubernetes network project +type OVNKubernetesConfig struct { + // mtu is the MTU to use for the tunnel interface. This must be 100 + // bytes smaller than the uplink mtu. + // Default is 1400 + // +kubebuilder:validation:Minimum=0 + // +optional + MTU *uint32 `json:"mtu,omitempty"` + // geneve port is the UDP port to be used by geneve encapulation. + // Default is 6081 + // +kubebuilder:validation:Minimum=1 + // +optional + GenevePort *uint32 `json:"genevePort,omitempty"` + // hybridOverlayConfig configures an additional overlay network for peers that are + // not using OVN. + // +optional + HybridOverlayConfig *HybridOverlayConfig `json:"hybridOverlayConfig,omitempty"` + // ipsecConfig enables and configures IPsec for pods on the pod network within the + // cluster. + // +optional + // +kubebuilder:default={"mode": "Disabled"} + // +default={"mode": "Disabled"} + IPsecConfig *IPsecConfig `json:"ipsecConfig,omitempty"` + // policyAuditConfig is the configuration for network policy audit events. If unset, + // reported defaults are used. + // +optional + PolicyAuditConfig *PolicyAuditConfig `json:"policyAuditConfig,omitempty"` + // gatewayConfig holds the configuration for node gateway options. + // +optional + GatewayConfig *GatewayConfig `json:"gatewayConfig,omitempty"` + // v4InternalSubnet is a v4 subnet used internally by ovn-kubernetes in case the + // default one is being already used by something else. It must not overlap with + // any other subnet being used by OpenShift or by the node network. The size of the + // subnet must be larger than the number of nodes. The value cannot be changed + // after installation. + // Default is 100.64.0.0/16 + // +optional + V4InternalSubnet string `json:"v4InternalSubnet,omitempty"` + // v6InternalSubnet is a v6 subnet used internally by ovn-kubernetes in case the + // default one is being already used by something else. It must not overlap with + // any other subnet being used by OpenShift or by the node network. The size of the + // subnet must be larger than the number of nodes. The value cannot be changed + // after installation. + // Default is fd98::/48 + // +optional + V6InternalSubnet string `json:"v6InternalSubnet,omitempty"` + // egressIPConfig holds the configuration for EgressIP options. + // +optional + EgressIPConfig EgressIPConfig `json:"egressIPConfig,omitempty"` + // ipv4 allows users to configure IP settings for IPv4 connections. When ommitted, + // this means no opinions and the default configuration is used. Check individual + // fields within ipv4 for details of default values. + // +optional + IPv4 *IPv4OVNKubernetesConfig `json:"ipv4,omitempty"` + // ipv6 allows users to configure IP settings for IPv6 connections. When ommitted, + // this means no opinions and the default configuration is used. Check individual + // fields within ipv4 for details of default values. + // +optional + IPv6 *IPv6OVNKubernetesConfig `json:"ipv6,omitempty"` + + // routeAdvertisements determines if the functionality to advertise cluster + // network routes through a dynamic routing protocol, such as BGP, is + // enabled or not. This functionality is configured through the + // ovn-kubernetes RouteAdvertisements CRD. Requires the 'FRR' routing + // capability provider to be enabled as an additional routing capability. + // Allowed values are "Enabled", "Disabled" and ommited. When omitted, this + // means the user has no opinion and the platform is left to choose + // reasonable defaults. These defaults are subject to change over time. The + // current default is "Disabled". + // +openshift:enable:FeatureGate=RouteAdvertisements + // +optional + RouteAdvertisements RouteAdvertisementsEnablement `json:"routeAdvertisements,omitempty"` +} + +type IPv4OVNKubernetesConfig struct { + // internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally + // by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect + // architecture that connects the cluster routers on each node together to enable + // east west traffic. The subnet chosen should not overlap with other networks + // specified for OVN-Kubernetes as well as other networks used on the host. + // The value cannot be changed after installation. + // When ommitted, this means no opinion and the platform is left to choose a reasonable + // default which is subject to change over time. + // The current default subnet is 100.88.0.0/16 + // The subnet must be large enough to accomadate one IP per node in your cluster + // The value must be in proper IPV4 CIDR format + // +kubebuilder:validation:MaxLength=18 + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 30",message="subnet must be in the range /0 to /30 inclusive" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && int(self.split('.')[0]) > 0",message="first IP address octet must not be 0" + // +optional + InternalTransitSwitchSubnet string `json:"internalTransitSwitchSubnet,omitempty"` + // internalJoinSubnet is a v4 subnet used internally by ovn-kubernetes in case the + // default one is being already used by something else. It must not overlap with + // any other subnet being used by OpenShift or by the node network. The size of the + // subnet must be larger than the number of nodes. The value cannot be changed + // after installation. + // The current default value is 100.64.0.0/16 + // The subnet must be large enough to accomadate one IP per node in your cluster + // The value must be in proper IPV4 CIDR format + // +kubebuilder:validation:MaxLength=18 + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 30",message="subnet must be in the range /0 to /30 inclusive" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && int(self.split('.')[0]) > 0",message="first IP address octet must not be 0" + // +optional + InternalJoinSubnet string `json:"internalJoinSubnet,omitempty"` +} + +type IPv6OVNKubernetesConfig struct { + // internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally + // by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect + // architecture that connects the cluster routers on each node together to enable + // east west traffic. The subnet chosen should not overlap with other networks + // specified for OVN-Kubernetes as well as other networks used on the host. + // The value cannot be changed after installation. + // When ommitted, this means no opinion and the platform is left to choose a reasonable + // default which is subject to change over time. + // The subnet must be large enough to accomadate one IP per node in your cluster + // The current default subnet is fd97::/64 + // The value must be in proper IPV6 CIDR format + // Note that IPV6 dual addresses are not permitted + // +kubebuilder:validation:MaxLength=48 + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 6",message="Subnet must be in valid IPV6 CIDR format" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 125",message="subnet must be in the range /0 to /125 inclusive" + // +optional + InternalTransitSwitchSubnet string `json:"internalTransitSwitchSubnet,omitempty"` + // internalJoinSubnet is a v6 subnet used internally by ovn-kubernetes in case the + // default one is being already used by something else. It must not overlap with + // any other subnet being used by OpenShift or by the node network. The size of the + // subnet must be larger than the number of nodes. The value cannot be changed + // after installation. + // The subnet must be large enough to accomadate one IP per node in your cluster + // The current default value is fd98::/48 + // The value must be in proper IPV6 CIDR format + // Note that IPV6 dual addresses are not permitted + // +kubebuilder:validation:MaxLength=48 + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 6",message="Subnet must be in valid IPV6 CIDR format" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 125",message="subnet must be in the range /0 to /125 inclusive" + // +optional + InternalJoinSubnet string `json:"internalJoinSubnet,omitempty"` +} + +type HybridOverlayConfig struct { + // hybridClusterNetwork defines a network space given to nodes on an additional overlay network. + // +listType=atomic + HybridClusterNetwork []ClusterNetworkEntry `json:"hybridClusterNetwork"` + // hybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. + // Default is 4789 + // +optional + HybridOverlayVXLANPort *uint32 `json:"hybridOverlayVXLANPort,omitempty"` +} + +// +kubebuilder:validation:XValidation:rule="self == oldSelf || has(self.mode)",message="ipsecConfig.mode is required" +type IPsecConfig struct { + // mode defines the behaviour of the ipsec configuration within the platform. + // Valid values are `Disabled`, `External` and `Full`. + // When 'Disabled', ipsec will not be enabled at the node level. + // When 'External', ipsec is enabled on the node level but requires the user to configure the secure communication parameters. + // This mode is for external secure communications and the configuration can be done using the k8s-nmstate operator. + // When 'Full', ipsec is configured on the node level and inter-pod secure communication within the cluster is configured. + // Note with `Full`, if ipsec is desired for communication with external (to the cluster) entities (such as storage arrays), + // this is left to the user to configure. + // +kubebuilder:validation:Enum=Disabled;External;Full + // +optional + Mode IPsecMode `json:"mode,omitempty"` +} + +type IPForwardingMode string + +const ( + // IPForwardingRestricted limits the IP forwarding on OVN-Kube managed interfaces (br-ex, br-ex1) to only required + // service and other k8s related traffic + IPForwardingRestricted IPForwardingMode = "Restricted" + + // IPForwardingGlobal allows all IP traffic to be forwarded across OVN-Kube managed interfaces + IPForwardingGlobal IPForwardingMode = "Global" +) + +// GatewayConfig holds node gateway-related parsed config file parameters and command-line overrides +type GatewayConfig struct { + // routingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port + // into the host before sending it out. If this is not set, traffic will always egress directly + // from OVN to outside without touching the host stack. Setting this to true means hardware + // offload will not be supported. Default is false if GatewayConfig is specified. + // +kubebuilder:default:=false + // +optional + RoutingViaHost bool `json:"routingViaHost,omitempty"` + // ipForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). + // By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other + // IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across + // OVN-Kubernetes managed interfaces, then set this field to "Global". + // The supported values are "Restricted" and "Global". + // +optional + IPForwarding IPForwardingMode `json:"ipForwarding,omitempty"` + // ipv4 allows users to configure IP settings for IPv4 connections. When omitted, this means no opinion and the default + // configuration is used. Check individual members fields within ipv4 for details of default values. + // +optional + IPv4 IPv4GatewayConfig `json:"ipv4,omitempty"` + // ipv6 allows users to configure IP settings for IPv6 connections. When omitted, this means no opinion and the default + // configuration is used. Check individual members fields within ipv6 for details of default values. + // +optional + IPv6 IPv6GatewayConfig `json:"ipv6,omitempty"` +} + +// IPV4GatewayConfig holds the configuration paramaters for IPV4 connections in the GatewayConfig for OVN-Kubernetes +type IPv4GatewayConfig struct { + // internalMasqueradeSubnet contains the masquerade addresses in IPV4 CIDR format used internally by + // ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these + // addresses, as well as the shared gateway bridge interface. The values can be changed after + // installation. The subnet chosen should not overlap with other networks specified for + // OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must + // be large enough to accommodate 6 IPs (maximum prefix length /29). + // When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. + // The current default subnet is 169.254.169.0/29 + // The value must be in proper IPV4 CIDR format + // +kubebuilder:validation:MaxLength=18 + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 29",message="subnet must be in the range /0 to /29 inclusive" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && int(self.split('.')[0]) > 0",message="first IP address octet must not be 0" + // +optional + InternalMasqueradeSubnet string `json:"internalMasqueradeSubnet,omitempty"` +} + +// IPV6GatewayConfig holds the configuration paramaters for IPV6 connections in the GatewayConfig for OVN-Kubernetes +type IPv6GatewayConfig struct { + // internalMasqueradeSubnet contains the masquerade addresses in IPV6 CIDR format used internally by + // ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these + // addresses, as well as the shared gateway bridge interface. The values can be changed after + // installation. The subnet chosen should not overlap with other networks specified for + // OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must + // be large enough to accommodate 6 IPs (maximum prefix length /125). + // When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. + // The current default subnet is fd69::/125 + // Note that IPV6 dual addresses are not permitted + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 6",message="Subnet must be in valid IPV6 CIDR format" + // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 125",message="subnet must be in the range /0 to /125 inclusive" + // +optional + InternalMasqueradeSubnet string `json:"internalMasqueradeSubnet,omitempty"` +} + +type ExportNetworkFlows struct { + // netFlow defines the NetFlow configuration. + // +optional + NetFlow *NetFlowConfig `json:"netFlow,omitempty"` + // sFlow defines the SFlow configuration. + // +optional + SFlow *SFlowConfig `json:"sFlow,omitempty"` + // ipfix defines IPFIX configuration. + // +optional + IPFIX *IPFIXConfig `json:"ipfix,omitempty"` +} + +type NetFlowConfig struct { + // netFlow defines the NetFlow collectors that will consume the flow data exported from OVS. + // It is a list of strings formatted as ip:port with a maximum of ten items + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=10 + // +listType=atomic + Collectors []IPPort `json:"collectors,omitempty"` +} + +type SFlowConfig struct { + // sFlowCollectors is list of strings formatted as ip:port with a maximum of ten items + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=10 + // +listType=atomic + Collectors []IPPort `json:"collectors,omitempty"` +} + +type IPFIXConfig struct { + // ipfixCollectors is list of strings formatted as ip:port with a maximum of ten items + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=10 + // +listType=atomic + Collectors []IPPort `json:"collectors,omitempty"` +} + +// +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$` +type IPPort string + +type PolicyAuditConfig struct { + // rateLimit is the approximate maximum number of messages to generate per-second per-node. If + // unset the default of 20 msg/sec is used. + // +kubebuilder:default=20 + // +kubebuilder:validation:Minimum=1 + // +optional + RateLimit *uint32 `json:"rateLimit,omitempty"` + + // maxFilesSize is the max size an ACL_audit log file is allowed to reach before rotation occurs + // Units are in MB and the Default is 50MB + // +kubebuilder:default=50 + // +kubebuilder:validation:Minimum=1 + // +optional + MaxFileSize *uint32 `json:"maxFileSize,omitempty"` + + // maxLogFiles specifies the maximum number of ACL_audit log files that can be present. + // +kubebuilder:default=5 + // +kubebuilder:validation:Minimum=1 + // +optional + MaxLogFiles *int32 `json:"maxLogFiles,omitempty"` + + // destination is the location for policy log messages. + // Regardless of this config, persistent logs will always be dumped to the host + // at /var/log/ovn/ however + // Additionally syslog output may be configured as follows. + // Valid values are: + // - "libc" -> to use the libc syslog() function of the host node's journdald process + // - "udp:host:port" -> for sending syslog over UDP + // - "unix:file" -> for using the UNIX domain socket directly + // - "null" -> to discard all messages logged to syslog + // The default is "null" + // +kubebuilder:default=null + // +kubebuilder:pattern='^libc$|^null$|^udp:(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]):([0-9]){0,5}$|^unix:(\/[^\/ ]*)+([^\/\s])$' + // +optional + Destination string `json:"destination,omitempty"` + + // syslogFacility the RFC5424 facility for generated messages, e.g. "kern". Default is "local0" + // +kubebuilder:default=local0 + // +kubebuilder:Enum=kern;user;mail;daemon;auth;syslog;lpr;news;uucp;clock;ftp;ntp;audit;alert;clock2;local0;local1;local2;local3;local4;local5;local6;local7 + // +optional + SyslogFacility string `json:"syslogFacility,omitempty"` +} + +// NetworkType describes the network plugin type to configure +type NetworkType string + +// ProxyArgumentList is a list of arguments to pass to the kubeproxy process +// +listType=atomic +type ProxyArgumentList []string + +// ProxyConfig defines the configuration knobs for kubeproxy +// All of these are optional and have sensible defaults +type ProxyConfig struct { + // An internal kube-proxy parameter. In older releases of OCP, this sometimes needed to be adjusted + // in large clusters for performance reasons, but this is no longer necessary, and there is no reason + // to change this from the default value. + // Default: 30s + IptablesSyncPeriod string `json:"iptablesSyncPeriod,omitempty"` + + // The address to "bind" on + // Defaults to 0.0.0.0 + BindAddress string `json:"bindAddress,omitempty"` + + // Any additional arguments to pass to the kubeproxy process + ProxyArguments map[string]ProxyArgumentList `json:"proxyArguments,omitempty"` +} + +// EgressIPConfig defines the configuration knobs for egressip +type EgressIPConfig struct { + // reachabilityTotalTimeout configures the EgressIP node reachability check total timeout in seconds. + // If the EgressIP node cannot be reached within this timeout, the node is declared down. + // Setting a large value may cause the EgressIP feature to react slowly to node changes. + // In particular, it may react slowly for EgressIP nodes that really have a genuine problem and are unreachable. + // When omitted, this means the user has no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default is 1 second. + // A value of 0 disables the EgressIP node's reachability check. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=60 + // +optional + ReachabilityTotalTimeoutSeconds *uint32 `json:"reachabilityTotalTimeoutSeconds,omitempty"` +} + +const ( + // NetworkTypeOpenShiftSDN means the openshift-sdn plugin will be configured. + // DEPRECATED: OpenShift SDN is no longer supported + NetworkTypeOpenShiftSDN NetworkType = "OpenShiftSDN" + + // NetworkTypeOVNKubernetes means the ovn-kubernetes plugin will be configured. + NetworkTypeOVNKubernetes NetworkType = "OVNKubernetes" + + // NetworkTypeRaw + NetworkTypeRaw NetworkType = "Raw" + + // NetworkTypeSimpleMacvlan + NetworkTypeSimpleMacvlan NetworkType = "SimpleMacvlan" +) + +// SDNMode is the Mode the openshift-sdn plugin is in. +// DEPRECATED: OpenShift SDN is no longer supported +type SDNMode string + +const ( + // SDNModeSubnet is a simple mode that offers no isolation between pods + // DEPRECATED: OpenShift SDN is no longer supported + SDNModeSubnet SDNMode = "Subnet" + + // SDNModeMultitenant is a special "multitenant" mode that offers limited + // isolation configuration between namespaces + // DEPRECATED: OpenShift SDN is no longer supported + SDNModeMultitenant SDNMode = "Multitenant" + + // SDNModeNetworkPolicy is a full NetworkPolicy implementation that allows + // for sophisticated network isolation and segmenting. This is the default. + // DEPRECATED: OpenShift SDN is no longer supported + SDNModeNetworkPolicy SDNMode = "NetworkPolicy" +) + +// MacvlanMode is the Mode of macvlan. The value are lowercase to match the CNI plugin +// config values. See "man ip-link" for its detail. +type MacvlanMode string + +const ( + // MacvlanModeBridge is the macvlan with thin bridge function. + MacvlanModeBridge MacvlanMode = "Bridge" + // MacvlanModePrivate + MacvlanModePrivate MacvlanMode = "Private" + // MacvlanModeVEPA is used with Virtual Ethernet Port Aggregator + // (802.1qbg) swtich + MacvlanModeVEPA MacvlanMode = "VEPA" + // MacvlanModePassthru + MacvlanModePassthru MacvlanMode = "Passthru" +) + +// IPAMType describes the IP address management type to configure +type IPAMType string + +const ( + // IPAMTypeDHCP uses DHCP for IP management + IPAMTypeDHCP IPAMType = "DHCP" + // IPAMTypeStatic uses static IP + IPAMTypeStatic IPAMType = "Static" +) + +// IPsecMode enumerates the modes for IPsec configuration +type IPsecMode string + +const ( + // IPsecModeDisabled disables IPsec altogether + IPsecModeDisabled IPsecMode = "Disabled" + // IPsecModeExternal enables IPsec on the node level, but expects the user to configure it using k8s-nmstate or + // other means - it is most useful for secure communication from the cluster to external endpoints + IPsecModeExternal IPsecMode = "External" + // IPsecModeFull enables IPsec on the node level (the same as IPsecModeExternal), and configures it to secure communication + // between pods on the cluster network. + IPsecModeFull IPsecMode = "Full" +) + +// +kubebuilder:validation:Enum:="";"Enabled";"Disabled" +type RouteAdvertisementsEnablement string + +var ( + // RouteAdvertisementsEnabled enables route advertisements for ovn-kubernetes + RouteAdvertisementsEnabled RouteAdvertisementsEnablement = "Enabled" + // RouteAdvertisementsDisabled disables route advertisements for ovn-kubernetes + RouteAdvertisementsDisabled RouteAdvertisementsEnablement = "Disabled" +) + +// RoutingCapabilitiesProvider is a component providing routing capabilities. +// +kubebuilder:validation:Enum=FRR +type RoutingCapabilitiesProvider string + +const ( + // RoutingCapabilitiesProviderFRR determines FRR is providing advanced + // routing capabilities. + RoutingCapabilitiesProviderFRR RoutingCapabilitiesProvider = "FRR" +) + +// AdditionalRoutingCapabilities describes components and relevant configuration providing +// advanced routing capabilities. +type AdditionalRoutingCapabilities struct { + // providers is a set of enabled components that provide additional routing + // capabilities. Entries on this list must be unique. The only valid value + // is currrently "FRR" which provides FRR routing capabilities through the + // deployment of FRR. + // +listType=atomic + // +required + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=1 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" + Providers []RoutingCapabilitiesProvider `json:"providers"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_olm.go b/vendor/github.com/openshift/api/operator/v1/types_olm.go new file mode 100644 index 0000000000000..07c94ece2e215 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_olm.go @@ -0,0 +1,61 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OLM provides information to configure an operator to manage the OLM controllers +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=olms,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=include.release.openshift.io/ibm-cloud-managed=false +// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1504 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=operator-lifecycle-manager,operatorOrdering=01 +// +openshift:enable:FeatureGate=NewOLM +// +openshift:capability=OperatorLifecycleManagerV1 +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'cluster'",message="olm is a singleton, .metadata.name must be 'cluster'" +type OLM struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + //spec holds user settable values for configuration + //+kubebuilder:validation:Required + Spec OLMSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status OLMStatus `json:"status"` +} + +type OLMSpec struct { + OperatorSpec `json:",inline"` +} + +type OLMStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OLMList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OLMList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []OLM `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go new file mode 100644 index 0000000000000..a96e033cb7beb --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go @@ -0,0 +1,59 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=openshiftapiservers,scope=Cluster,categories=coreoperators +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475 +// +openshift:file-pattern=cvoRunLevel=0000_30,operatorName=openshift-apiserver,operatorOrdering=01 + +// OpenShiftAPIServer provides information to configure an operator to manage openshift-apiserver. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OpenShiftAPIServer struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the OpenShift API Server. + // +required + Spec OpenShiftAPIServerSpec `json:"spec"` + + // status defines the observed status of the OpenShift API Server. + // +optional + Status OpenShiftAPIServerStatus `json:"status"` +} + +type OpenShiftAPIServerSpec struct { + OperatorSpec `json:",inline"` +} + +type OpenShiftAPIServerStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenShiftAPIServerList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OpenShiftAPIServerList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []OpenShiftAPIServer `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go new file mode 100644 index 0000000000000..8a553a05790bf --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go @@ -0,0 +1,56 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=openshiftcontrollermanagers,scope=Cluster,categories=coreoperators +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475 +// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=openshift-controller-manager,operatorOrdering=02 + +// OpenShiftControllerManager provides information to configure an operator to manage openshift-controller-manager. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OpenShiftControllerManager struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // +required + Spec OpenShiftControllerManagerSpec `json:"spec"` + // +optional + Status OpenShiftControllerManagerStatus `json:"status"` +} + +type OpenShiftControllerManagerSpec struct { + OperatorSpec `json:",inline"` +} + +type OpenShiftControllerManagerStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenShiftControllerManagerList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OpenShiftControllerManagerList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []OpenShiftControllerManager `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_scheduler.go b/vendor/github.com/openshift/api/operator/v1/types_scheduler.go new file mode 100644 index 0000000000000..cfb04e8d94fea --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_scheduler.go @@ -0,0 +1,59 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=kubeschedulers,scope=Cluster,categories=coreoperators +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475 +// +openshift:file-pattern=cvoRunLevel=0000_25,operatorName=kube-scheduler,operatorOrdering=01 + +// KubeScheduler provides information to configure an operator to manage scheduler. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type KubeScheduler struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Kubernetes Scheduler + // +required + Spec KubeSchedulerSpec `json:"spec"` + + // status is the most recently observed status of the Kubernetes Scheduler + // +optional + Status KubeSchedulerStatus `json:"status"` +} + +type KubeSchedulerSpec struct { + StaticPodOperatorSpec `json:",inline"` +} + +type KubeSchedulerStatus struct { + StaticPodOperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeSchedulerList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type KubeSchedulerList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []KubeScheduler `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_serviceca.go b/vendor/github.com/openshift/api/operator/v1/types_serviceca.go new file mode 100644 index 0000000000000..48534d4c63bb3 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_serviceca.go @@ -0,0 +1,58 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=servicecas,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475 +// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=service-ca,operatorOrdering=02 + +// ServiceCA provides information to configure an operator to manage the service cert controllers +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ServiceCA struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + //spec holds user settable values for configuration + // +required + Spec ServiceCASpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ServiceCAStatus `json:"status"` +} + +type ServiceCASpec struct { + OperatorSpec `json:",inline"` +} + +type ServiceCAStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCAList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ServiceCAList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []ServiceCA `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go new file mode 100644 index 0000000000000..e058c065a67d4 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go @@ -0,0 +1,53 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCatalogAPIServer provides information to configure an operator to manage Service Catalog API Server +// DEPRECATED: will be removed in 4.6 +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ServiceCatalogAPIServer struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +required + Spec ServiceCatalogAPIServerSpec `json:"spec"` + // +optional + Status ServiceCatalogAPIServerStatus `json:"status"` +} + +type ServiceCatalogAPIServerSpec struct { + OperatorSpec `json:",inline"` +} + +type ServiceCatalogAPIServerStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCatalogAPIServerList is a collection of items +// DEPRECATED: will be removed in 4.6 +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ServiceCatalogAPIServerList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []ServiceCatalogAPIServer `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go new file mode 100644 index 0000000000000..4fe2aa46a3211 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go @@ -0,0 +1,53 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCatalogControllerManager provides information to configure an operator to manage Service Catalog Controller Manager +// DEPRECATED: will be removed in 4.6 +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ServiceCatalogControllerManager struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // +required + Spec ServiceCatalogControllerManagerSpec `json:"spec"` + // +optional + Status ServiceCatalogControllerManagerStatus `json:"status"` +} + +type ServiceCatalogControllerManagerSpec struct { + OperatorSpec `json:",inline"` +} + +type ServiceCatalogControllerManagerStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCatalogControllerManagerList is a collection of items +// DEPRECATED: will be removed in 4.6 +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ServiceCatalogControllerManagerList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []ServiceCatalogControllerManager `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_storage.go b/vendor/github.com/openshift/api/operator/v1/types_storage.go new file mode 100644 index 0000000000000..69691a83adeb5 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_storage.go @@ -0,0 +1,79 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=storages,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/670 +// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=storage,operatorOrdering=01 + +// Storage provides a means to configure an operator to manage the cluster storage operator. `cluster` is the canonical name. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Storage struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +required + Spec StorageSpec `json:"spec"` + + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status StorageStatus `json:"status"` +} + +// StorageDriverType indicates whether CSI migration should be enabled for drivers where it is optional. +// +kubebuilder:validation:Enum="";LegacyDeprecatedInTreeDriver;CSIWithMigrationDriver +type StorageDriverType string + +const ( + LegacyDeprecatedInTreeDriver StorageDriverType = "LegacyDeprecatedInTreeDriver" + CSIWithMigrationDriver StorageDriverType = "CSIWithMigrationDriver" +) + +// StorageSpec is the specification of the desired behavior of the cluster storage operator. +type StorageSpec struct { + OperatorSpec `json:",inline"` + + // vsphereStorageDriver indicates the storage driver to use on VSphere clusters. + // Once this field is set to CSIWithMigrationDriver, it can not be changed. + // If this is empty, the platform will choose a good default, + // which may change over time without notice. + // The current default is CSIWithMigrationDriver and may not be changed. + // DEPRECATED: This field will be removed in a future release. + // +kubebuilder:validation:XValidation:rule="self != \"LegacyDeprecatedInTreeDriver\"",message="VSphereStorageDriver can not be set to LegacyDeprecatedInTreeDriver" + // +optional + VSphereStorageDriver StorageDriverType `json:"vsphereStorageDriver"` +} + +// StorageStatus defines the observed status of the cluster storage operator. +type StorageStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StorageList contains a list of Storages. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type StorageList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + Items []Storage `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..3b984f2a618ba --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go @@ -0,0 +1,5330 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + authorizationv1 "k8s.io/api/authorization/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSCSIDriverConfigSpec) DeepCopyInto(out *AWSCSIDriverConfigSpec) { + *out = *in + if in.EFSVolumeMetrics != nil { + in, out := &in.EFSVolumeMetrics, &out.EFSVolumeMetrics + *out = new(AWSEFSVolumeMetrics) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSCSIDriverConfigSpec. +func (in *AWSCSIDriverConfigSpec) DeepCopy() *AWSCSIDriverConfigSpec { + if in == nil { + return nil + } + out := new(AWSCSIDriverConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSClassicLoadBalancerParameters) DeepCopyInto(out *AWSClassicLoadBalancerParameters) { + *out = *in + out.ConnectionIdleTimeout = in.ConnectionIdleTimeout + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = new(AWSSubnets) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClassicLoadBalancerParameters. +func (in *AWSClassicLoadBalancerParameters) DeepCopy() *AWSClassicLoadBalancerParameters { + if in == nil { + return nil + } + out := new(AWSClassicLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSEFSVolumeMetrics) DeepCopyInto(out *AWSEFSVolumeMetrics) { + *out = *in + if in.RecursiveWalk != nil { + in, out := &in.RecursiveWalk, &out.RecursiveWalk + *out = new(AWSEFSVolumeMetricsRecursiveWalkConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSEFSVolumeMetrics. +func (in *AWSEFSVolumeMetrics) DeepCopy() *AWSEFSVolumeMetrics { + if in == nil { + return nil + } + out := new(AWSEFSVolumeMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSEFSVolumeMetricsRecursiveWalkConfig) DeepCopyInto(out *AWSEFSVolumeMetricsRecursiveWalkConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSEFSVolumeMetricsRecursiveWalkConfig. +func (in *AWSEFSVolumeMetricsRecursiveWalkConfig) DeepCopy() *AWSEFSVolumeMetricsRecursiveWalkConfig { + if in == nil { + return nil + } + out := new(AWSEFSVolumeMetricsRecursiveWalkConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSLoadBalancerParameters) DeepCopyInto(out *AWSLoadBalancerParameters) { + *out = *in + if in.ClassicLoadBalancerParameters != nil { + in, out := &in.ClassicLoadBalancerParameters, &out.ClassicLoadBalancerParameters + *out = new(AWSClassicLoadBalancerParameters) + (*in).DeepCopyInto(*out) + } + if in.NetworkLoadBalancerParameters != nil { + in, out := &in.NetworkLoadBalancerParameters, &out.NetworkLoadBalancerParameters + *out = new(AWSNetworkLoadBalancerParameters) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSLoadBalancerParameters. +func (in *AWSLoadBalancerParameters) DeepCopy() *AWSLoadBalancerParameters { + if in == nil { + return nil + } + out := new(AWSLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSNetworkLoadBalancerParameters) DeepCopyInto(out *AWSNetworkLoadBalancerParameters) { + *out = *in + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = new(AWSSubnets) + (*in).DeepCopyInto(*out) + } + if in.EIPAllocations != nil { + in, out := &in.EIPAllocations, &out.EIPAllocations + *out = make([]EIPAllocation, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSNetworkLoadBalancerParameters. +func (in *AWSNetworkLoadBalancerParameters) DeepCopy() *AWSNetworkLoadBalancerParameters { + if in == nil { + return nil + } + out := new(AWSNetworkLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSSubnets) DeepCopyInto(out *AWSSubnets) { + *out = *in + if in.IDs != nil { + in, out := &in.IDs, &out.IDs + *out = make([]AWSSubnetID, len(*in)) + copy(*out, *in) + } + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]AWSSubnetName, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSubnets. +func (in *AWSSubnets) DeepCopy() *AWSSubnets { + if in == nil { + return nil + } + out := new(AWSSubnets) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessLogging) DeepCopyInto(out *AccessLogging) { + *out = *in + in.Destination.DeepCopyInto(&out.Destination) + in.HTTPCaptureHeaders.DeepCopyInto(&out.HTTPCaptureHeaders) + if in.HTTPCaptureCookies != nil { + in, out := &in.HTTPCaptureCookies, &out.HTTPCaptureCookies + *out = make([]IngressControllerCaptureHTTPCookie, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogging. +func (in *AccessLogging) DeepCopy() *AccessLogging { + if in == nil { + return nil + } + out := new(AccessLogging) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddPage) DeepCopyInto(out *AddPage) { + *out = *in + if in.DisabledActions != nil { + in, out := &in.DisabledActions, &out.DisabledActions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddPage. +func (in *AddPage) DeepCopy() *AddPage { + if in == nil { + return nil + } + out := new(AddPage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalNetworkDefinition) DeepCopyInto(out *AdditionalNetworkDefinition) { + *out = *in + if in.SimpleMacvlanConfig != nil { + in, out := &in.SimpleMacvlanConfig, &out.SimpleMacvlanConfig + *out = new(SimpleMacvlanConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalNetworkDefinition. +func (in *AdditionalNetworkDefinition) DeepCopy() *AdditionalNetworkDefinition { + if in == nil { + return nil + } + out := new(AdditionalNetworkDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalRoutingCapabilities) DeepCopyInto(out *AdditionalRoutingCapabilities) { + *out = *in + if in.Providers != nil { + in, out := &in.Providers, &out.Providers + *out = make([]RoutingCapabilitiesProvider, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalRoutingCapabilities. +func (in *AdditionalRoutingCapabilities) DeepCopy() *AdditionalRoutingCapabilities { + if in == nil { + return nil + } + out := new(AdditionalRoutingCapabilities) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Authentication) DeepCopyInto(out *Authentication) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authentication. +func (in *Authentication) DeepCopy() *Authentication { + if in == nil { + return nil + } + out := new(Authentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Authentication) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationList) DeepCopyInto(out *AuthenticationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Authentication, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationList. +func (in *AuthenticationList) DeepCopy() *AuthenticationList { + if in == nil { + return nil + } + out := new(AuthenticationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthenticationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationSpec. +func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec { + if in == nil { + return nil + } + out := new(AuthenticationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationStatus) DeepCopyInto(out *AuthenticationStatus) { + *out = *in + out.OAuthAPIServer = in.OAuthAPIServer + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationStatus. +func (in *AuthenticationStatus) DeepCopy() *AuthenticationStatus { + if in == nil { + return nil + } + out := new(AuthenticationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureCSIDriverConfigSpec) DeepCopyInto(out *AzureCSIDriverConfigSpec) { + *out = *in + if in.DiskEncryptionSet != nil { + in, out := &in.DiskEncryptionSet, &out.DiskEncryptionSet + *out = new(AzureDiskEncryptionSet) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureCSIDriverConfigSpec. +func (in *AzureCSIDriverConfigSpec) DeepCopy() *AzureCSIDriverConfigSpec { + if in == nil { + return nil + } + out := new(AzureCSIDriverConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureDiskEncryptionSet) DeepCopyInto(out *AzureDiskEncryptionSet) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDiskEncryptionSet. +func (in *AzureDiskEncryptionSet) DeepCopy() *AzureDiskEncryptionSet { + if in == nil { + return nil + } + out := new(AzureDiskEncryptionSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIDriverConfigSpec) DeepCopyInto(out *CSIDriverConfigSpec) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSCSIDriverConfigSpec) + (*in).DeepCopyInto(*out) + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(AzureCSIDriverConfigSpec) + (*in).DeepCopyInto(*out) + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(GCPCSIDriverConfigSpec) + (*in).DeepCopyInto(*out) + } + if in.IBMCloud != nil { + in, out := &in.IBMCloud, &out.IBMCloud + *out = new(IBMCloudCSIDriverConfigSpec) + **out = **in + } + if in.VSphere != nil { + in, out := &in.VSphere, &out.VSphere + *out = new(VSphereCSIDriverConfigSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverConfigSpec. +func (in *CSIDriverConfigSpec) DeepCopy() *CSIDriverConfigSpec { + if in == nil { + return nil + } + out := new(CSIDriverConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSISnapshotController) DeepCopyInto(out *CSISnapshotController) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotController. +func (in *CSISnapshotController) DeepCopy() *CSISnapshotController { + if in == nil { + return nil + } + out := new(CSISnapshotController) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSISnapshotController) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSISnapshotControllerList) DeepCopyInto(out *CSISnapshotControllerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CSISnapshotController, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotControllerList. +func (in *CSISnapshotControllerList) DeepCopy() *CSISnapshotControllerList { + if in == nil { + return nil + } + out := new(CSISnapshotControllerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSISnapshotControllerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSISnapshotControllerSpec) DeepCopyInto(out *CSISnapshotControllerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotControllerSpec. +func (in *CSISnapshotControllerSpec) DeepCopy() *CSISnapshotControllerSpec { + if in == nil { + return nil + } + out := new(CSISnapshotControllerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSISnapshotControllerStatus) DeepCopyInto(out *CSISnapshotControllerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotControllerStatus. +func (in *CSISnapshotControllerStatus) DeepCopy() *CSISnapshotControllerStatus { + if in == nil { + return nil + } + out := new(CSISnapshotControllerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Capability) DeepCopyInto(out *Capability) { + *out = *in + out.Visibility = in.Visibility + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Capability. +func (in *Capability) DeepCopy() *Capability { + if in == nil { + return nil + } + out := new(Capability) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapabilityVisibility) DeepCopyInto(out *CapabilityVisibility) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapabilityVisibility. +func (in *CapabilityVisibility) DeepCopy() *CapabilityVisibility { + if in == nil { + return nil + } + out := new(CapabilityVisibility) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientTLS) DeepCopyInto(out *ClientTLS) { + *out = *in + out.ClientCA = in.ClientCA + if in.AllowedSubjectPatterns != nil { + in, out := &in.AllowedSubjectPatterns, &out.AllowedSubjectPatterns + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientTLS. +func (in *ClientTLS) DeepCopy() *ClientTLS { + if in == nil { + return nil + } + out := new(ClientTLS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudCredential) DeepCopyInto(out *CloudCredential) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredential. +func (in *CloudCredential) DeepCopy() *CloudCredential { + if in == nil { + return nil + } + out := new(CloudCredential) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudCredential) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudCredentialList) DeepCopyInto(out *CloudCredentialList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CloudCredential, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredentialList. +func (in *CloudCredentialList) DeepCopy() *CloudCredentialList { + if in == nil { + return nil + } + out := new(CloudCredentialList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudCredentialList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudCredentialSpec) DeepCopyInto(out *CloudCredentialSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredentialSpec. +func (in *CloudCredentialSpec) DeepCopy() *CloudCredentialSpec { + if in == nil { + return nil + } + out := new(CloudCredentialSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudCredentialStatus) DeepCopyInto(out *CloudCredentialStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredentialStatus. +func (in *CloudCredentialStatus) DeepCopy() *CloudCredentialStatus { + if in == nil { + return nil + } + out := new(CloudCredentialStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCSIDriver) DeepCopyInto(out *ClusterCSIDriver) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriver. +func (in *ClusterCSIDriver) DeepCopy() *ClusterCSIDriver { + if in == nil { + return nil + } + out := new(ClusterCSIDriver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterCSIDriver) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCSIDriverList) DeepCopyInto(out *ClusterCSIDriverList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterCSIDriver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriverList. +func (in *ClusterCSIDriverList) DeepCopy() *ClusterCSIDriverList { + if in == nil { + return nil + } + out := new(ClusterCSIDriverList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterCSIDriverList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCSIDriverSpec) DeepCopyInto(out *ClusterCSIDriverSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + in.DriverConfig.DeepCopyInto(&out.DriverConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriverSpec. +func (in *ClusterCSIDriverSpec) DeepCopy() *ClusterCSIDriverSpec { + if in == nil { + return nil + } + out := new(ClusterCSIDriverSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCSIDriverStatus) DeepCopyInto(out *ClusterCSIDriverStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriverStatus. +func (in *ClusterCSIDriverStatus) DeepCopy() *ClusterCSIDriverStatus { + if in == nil { + return nil + } + out := new(ClusterCSIDriverStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. +func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { + if in == nil { + return nil + } + out := new(ClusterNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Config) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigList) DeepCopyInto(out *ConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Config, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigList. +func (in *ConfigList) DeepCopy() *ConfigList { + if in == nil { + return nil + } + out := new(ConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigSpec) DeepCopyInto(out *ConfigSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSpec. +func (in *ConfigSpec) DeepCopy() *ConfigSpec { + if in == nil { + return nil + } + out := new(ConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigStatus) DeepCopyInto(out *ConfigStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigStatus. +func (in *ConfigStatus) DeepCopy() *ConfigStatus { + if in == nil { + return nil + } + out := new(ConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Console) DeepCopyInto(out *Console) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Console. +func (in *Console) DeepCopy() *Console { + if in == nil { + return nil + } + out := new(Console) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Console) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleConfigRoute) DeepCopyInto(out *ConsoleConfigRoute) { + *out = *in + out.Secret = in.Secret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleConfigRoute. +func (in *ConsoleConfigRoute) DeepCopy() *ConsoleConfigRoute { + if in == nil { + return nil + } + out := new(ConsoleConfigRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleCustomization) DeepCopyInto(out *ConsoleCustomization) { + *out = *in + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + out.CustomLogoFile = in.CustomLogoFile + in.DeveloperCatalog.DeepCopyInto(&out.DeveloperCatalog) + in.ProjectAccess.DeepCopyInto(&out.ProjectAccess) + in.QuickStarts.DeepCopyInto(&out.QuickStarts) + in.AddPage.DeepCopyInto(&out.AddPage) + if in.Perspectives != nil { + in, out := &in.Perspectives, &out.Perspectives + *out = make([]Perspective, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleCustomization. +func (in *ConsoleCustomization) DeepCopy() *ConsoleCustomization { + if in == nil { + return nil + } + out := new(ConsoleCustomization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleList) DeepCopyInto(out *ConsoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Console, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleList. +func (in *ConsoleList) DeepCopy() *ConsoleList { + if in == nil { + return nil + } + out := new(ConsoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleProviders) DeepCopyInto(out *ConsoleProviders) { + *out = *in + if in.Statuspage != nil { + in, out := &in.Statuspage, &out.Statuspage + *out = new(StatuspageProvider) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleProviders. +func (in *ConsoleProviders) DeepCopy() *ConsoleProviders { + if in == nil { + return nil + } + out := new(ConsoleProviders) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleSpec) DeepCopyInto(out *ConsoleSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + in.Customization.DeepCopyInto(&out.Customization) + in.Providers.DeepCopyInto(&out.Providers) + out.Route = in.Route + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.Ingress = in.Ingress + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSpec. +func (in *ConsoleSpec) DeepCopy() *ConsoleSpec { + if in == nil { + return nil + } + out := new(ConsoleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleStatus) DeepCopyInto(out *ConsoleStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleStatus. +func (in *ConsoleStatus) DeepCopy() *ConsoleStatus { + if in == nil { + return nil + } + out := new(ConsoleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerLoggingDestinationParameters) DeepCopyInto(out *ContainerLoggingDestinationParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerLoggingDestinationParameters. +func (in *ContainerLoggingDestinationParameters) DeepCopy() *ContainerLoggingDestinationParameters { + if in == nil { + return nil + } + out := new(ContainerLoggingDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNS) DeepCopyInto(out *DNS) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS. +func (in *DNS) DeepCopy() *DNS { + if in == nil { + return nil + } + out := new(DNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNS) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSCache) DeepCopyInto(out *DNSCache) { + *out = *in + out.PositiveTTL = in.PositiveTTL + out.NegativeTTL = in.NegativeTTL + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSCache. +func (in *DNSCache) DeepCopy() *DNSCache { + if in == nil { + return nil + } + out := new(DNSCache) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSList) DeepCopyInto(out *DNSList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DNS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSList. +func (in *DNSList) DeepCopy() *DNSList { + if in == nil { + return nil + } + out := new(DNSList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSNodePlacement) DeepCopyInto(out *DNSNodePlacement) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNodePlacement. +func (in *DNSNodePlacement) DeepCopy() *DNSNodePlacement { + if in == nil { + return nil + } + out := new(DNSNodePlacement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSOverTLSConfig) DeepCopyInto(out *DNSOverTLSConfig) { + *out = *in + out.CABundle = in.CABundle + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSOverTLSConfig. +func (in *DNSOverTLSConfig) DeepCopy() *DNSOverTLSConfig { + if in == nil { + return nil + } + out := new(DNSOverTLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSSpec) DeepCopyInto(out *DNSSpec) { + *out = *in + if in.Servers != nil { + in, out := &in.Servers, &out.Servers + *out = make([]Server, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.UpstreamResolvers.DeepCopyInto(&out.UpstreamResolvers) + in.NodePlacement.DeepCopyInto(&out.NodePlacement) + out.Cache = in.Cache + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSpec. +func (in *DNSSpec) DeepCopy() *DNSSpec { + if in == nil { + return nil + } + out := new(DNSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSStatus) DeepCopyInto(out *DNSStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]OperatorCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSStatus. +func (in *DNSStatus) DeepCopy() *DNSStatus { + if in == nil { + return nil + } + out := new(DNSStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSTransportConfig) DeepCopyInto(out *DNSTransportConfig) { + *out = *in + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(DNSOverTLSConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSTransportConfig. +func (in *DNSTransportConfig) DeepCopy() *DNSTransportConfig { + if in == nil { + return nil + } + out := new(DNSTransportConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultNetworkDefinition) DeepCopyInto(out *DefaultNetworkDefinition) { + *out = *in + if in.OpenShiftSDNConfig != nil { + in, out := &in.OpenShiftSDNConfig, &out.OpenShiftSDNConfig + *out = new(OpenShiftSDNConfig) + (*in).DeepCopyInto(*out) + } + if in.OVNKubernetesConfig != nil { + in, out := &in.OVNKubernetesConfig, &out.OVNKubernetesConfig + *out = new(OVNKubernetesConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultNetworkDefinition. +func (in *DefaultNetworkDefinition) DeepCopy() *DefaultNetworkDefinition { + if in == nil { + return nil + } + out := new(DefaultNetworkDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeveloperConsoleCatalogCategory) DeepCopyInto(out *DeveloperConsoleCatalogCategory) { + *out = *in + in.DeveloperConsoleCatalogCategoryMeta.DeepCopyInto(&out.DeveloperConsoleCatalogCategoryMeta) + if in.Subcategories != nil { + in, out := &in.Subcategories, &out.Subcategories + *out = make([]DeveloperConsoleCatalogCategoryMeta, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperConsoleCatalogCategory. +func (in *DeveloperConsoleCatalogCategory) DeepCopy() *DeveloperConsoleCatalogCategory { + if in == nil { + return nil + } + out := new(DeveloperConsoleCatalogCategory) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeveloperConsoleCatalogCategoryMeta) DeepCopyInto(out *DeveloperConsoleCatalogCategoryMeta) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperConsoleCatalogCategoryMeta. +func (in *DeveloperConsoleCatalogCategoryMeta) DeepCopy() *DeveloperConsoleCatalogCategoryMeta { + if in == nil { + return nil + } + out := new(DeveloperConsoleCatalogCategoryMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeveloperConsoleCatalogCustomization) DeepCopyInto(out *DeveloperConsoleCatalogCustomization) { + *out = *in + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]DeveloperConsoleCatalogCategory, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Types.DeepCopyInto(&out.Types) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperConsoleCatalogCustomization. +func (in *DeveloperConsoleCatalogCustomization) DeepCopy() *DeveloperConsoleCatalogCustomization { + if in == nil { + return nil + } + out := new(DeveloperConsoleCatalogCustomization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeveloperConsoleCatalogTypes) DeepCopyInto(out *DeveloperConsoleCatalogTypes) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperConsoleCatalogTypes. +func (in *DeveloperConsoleCatalogTypes) DeepCopy() *DeveloperConsoleCatalogTypes { + if in == nil { + return nil + } + out := new(DeveloperConsoleCatalogTypes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressIPConfig) DeepCopyInto(out *EgressIPConfig) { + *out = *in + if in.ReachabilityTotalTimeoutSeconds != nil { + in, out := &in.ReachabilityTotalTimeoutSeconds, &out.ReachabilityTotalTimeoutSeconds + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressIPConfig. +func (in *EgressIPConfig) DeepCopy() *EgressIPConfig { + if in == nil { + return nil + } + out := new(EgressIPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPublishingStrategy) DeepCopyInto(out *EndpointPublishingStrategy) { + *out = *in + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(LoadBalancerStrategy) + (*in).DeepCopyInto(*out) + } + if in.HostNetwork != nil { + in, out := &in.HostNetwork, &out.HostNetwork + *out = new(HostNetworkStrategy) + **out = **in + } + if in.Private != nil { + in, out := &in.Private, &out.Private + *out = new(PrivateStrategy) + **out = **in + } + if in.NodePort != nil { + in, out := &in.NodePort, &out.NodePort + *out = new(NodePortStrategy) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPublishingStrategy. +func (in *EndpointPublishingStrategy) DeepCopy() *EndpointPublishingStrategy { + if in == nil { + return nil + } + out := new(EndpointPublishingStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Etcd) DeepCopyInto(out *Etcd) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Etcd. +func (in *Etcd) DeepCopy() *Etcd { + if in == nil { + return nil + } + out := new(Etcd) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Etcd) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdList) DeepCopyInto(out *EtcdList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Etcd, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdList. +func (in *EtcdList) DeepCopy() *EtcdList { + if in == nil { + return nil + } + out := new(EtcdList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EtcdList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdSpec) DeepCopyInto(out *EtcdSpec) { + *out = *in + in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdSpec. +func (in *EtcdSpec) DeepCopy() *EtcdSpec { + if in == nil { + return nil + } + out := new(EtcdSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdStatus) DeepCopyInto(out *EtcdStatus) { + *out = *in + in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStatus. +func (in *EtcdStatus) DeepCopy() *EtcdStatus { + if in == nil { + return nil + } + out := new(EtcdStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExportNetworkFlows) DeepCopyInto(out *ExportNetworkFlows) { + *out = *in + if in.NetFlow != nil { + in, out := &in.NetFlow, &out.NetFlow + *out = new(NetFlowConfig) + (*in).DeepCopyInto(*out) + } + if in.SFlow != nil { + in, out := &in.SFlow, &out.SFlow + *out = new(SFlowConfig) + (*in).DeepCopyInto(*out) + } + if in.IPFIX != nil { + in, out := &in.IPFIX, &out.IPFIX + *out = new(IPFIXConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportNetworkFlows. +func (in *ExportNetworkFlows) DeepCopy() *ExportNetworkFlows { + if in == nil { + return nil + } + out := new(ExportNetworkFlows) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeaturesMigration) DeepCopyInto(out *FeaturesMigration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeaturesMigration. +func (in *FeaturesMigration) DeepCopy() *FeaturesMigration { + if in == nil { + return nil + } + out := new(FeaturesMigration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardPlugin) DeepCopyInto(out *ForwardPlugin) { + *out = *in + if in.Upstreams != nil { + in, out := &in.Upstreams, &out.Upstreams + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.TransportConfig.DeepCopyInto(&out.TransportConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardPlugin. +func (in *ForwardPlugin) DeepCopy() *ForwardPlugin { + if in == nil { + return nil + } + out := new(ForwardPlugin) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPCSIDriverConfigSpec) DeepCopyInto(out *GCPCSIDriverConfigSpec) { + *out = *in + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(GCPKMSKeyReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPCSIDriverConfigSpec. +func (in *GCPCSIDriverConfigSpec) DeepCopy() *GCPCSIDriverConfigSpec { + if in == nil { + return nil + } + out := new(GCPCSIDriverConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPKMSKeyReference) DeepCopyInto(out *GCPKMSKeyReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPKMSKeyReference. +func (in *GCPKMSKeyReference) DeepCopy() *GCPKMSKeyReference { + if in == nil { + return nil + } + out := new(GCPKMSKeyReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPLoadBalancerParameters) DeepCopyInto(out *GCPLoadBalancerParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPLoadBalancerParameters. +func (in *GCPLoadBalancerParameters) DeepCopy() *GCPLoadBalancerParameters { + if in == nil { + return nil + } + out := new(GCPLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayConfig) DeepCopyInto(out *GatewayConfig) { + *out = *in + out.IPv4 = in.IPv4 + out.IPv6 = in.IPv6 + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayConfig. +func (in *GatewayConfig) DeepCopy() *GatewayConfig { + if in == nil { + return nil + } + out := new(GatewayConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatherStatus) DeepCopyInto(out *GatherStatus) { + *out = *in + in.LastGatherTime.DeepCopyInto(&out.LastGatherTime) + out.LastGatherDuration = in.LastGatherDuration + if in.Gatherers != nil { + in, out := &in.Gatherers, &out.Gatherers + *out = make([]GathererStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatherStatus. +func (in *GatherStatus) DeepCopy() *GatherStatus { + if in == nil { + return nil + } + out := new(GatherStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GathererStatus) DeepCopyInto(out *GathererStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.LastGatherDuration = in.LastGatherDuration + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GathererStatus. +func (in *GathererStatus) DeepCopy() *GathererStatus { + if in == nil { + return nil + } + out := new(GathererStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenerationStatus) DeepCopyInto(out *GenerationStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerationStatus. +func (in *GenerationStatus) DeepCopy() *GenerationStatus { + if in == nil { + return nil + } + out := new(GenerationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPCompressionPolicy) DeepCopyInto(out *HTTPCompressionPolicy) { + *out = *in + if in.MimeTypes != nil { + in, out := &in.MimeTypes, &out.MimeTypes + *out = make([]CompressionMIMEType, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPCompressionPolicy. +func (in *HTTPCompressionPolicy) DeepCopy() *HTTPCompressionPolicy { + if in == nil { + return nil + } + out := new(HTTPCompressionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheck) DeepCopyInto(out *HealthCheck) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheck. +func (in *HealthCheck) DeepCopy() *HealthCheck { + if in == nil { + return nil + } + out := new(HealthCheck) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostNetworkStrategy) DeepCopyInto(out *HostNetworkStrategy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostNetworkStrategy. +func (in *HostNetworkStrategy) DeepCopy() *HostNetworkStrategy { + if in == nil { + return nil + } + out := new(HostNetworkStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HybridOverlayConfig) DeepCopyInto(out *HybridOverlayConfig) { + *out = *in + if in.HybridClusterNetwork != nil { + in, out := &in.HybridClusterNetwork, &out.HybridClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.HybridOverlayVXLANPort != nil { + in, out := &in.HybridOverlayVXLANPort, &out.HybridOverlayVXLANPort + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HybridOverlayConfig. +func (in *HybridOverlayConfig) DeepCopy() *HybridOverlayConfig { + if in == nil { + return nil + } + out := new(HybridOverlayConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudCSIDriverConfigSpec) DeepCopyInto(out *IBMCloudCSIDriverConfigSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudCSIDriverConfigSpec. +func (in *IBMCloudCSIDriverConfigSpec) DeepCopy() *IBMCloudCSIDriverConfigSpec { + if in == nil { + return nil + } + out := new(IBMCloudCSIDriverConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMLoadBalancerParameters) DeepCopyInto(out *IBMLoadBalancerParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMLoadBalancerParameters. +func (in *IBMLoadBalancerParameters) DeepCopy() *IBMLoadBalancerParameters { + if in == nil { + return nil + } + out := new(IBMLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAMConfig) DeepCopyInto(out *IPAMConfig) { + *out = *in + if in.StaticIPAMConfig != nil { + in, out := &in.StaticIPAMConfig, &out.StaticIPAMConfig + *out = new(StaticIPAMConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMConfig. +func (in *IPAMConfig) DeepCopy() *IPAMConfig { + if in == nil { + return nil + } + out := new(IPAMConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPFIXConfig) DeepCopyInto(out *IPFIXConfig) { + *out = *in + if in.Collectors != nil { + in, out := &in.Collectors, &out.Collectors + *out = make([]IPPort, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPFIXConfig. +func (in *IPFIXConfig) DeepCopy() *IPFIXConfig { + if in == nil { + return nil + } + out := new(IPFIXConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPsecConfig) DeepCopyInto(out *IPsecConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPsecConfig. +func (in *IPsecConfig) DeepCopy() *IPsecConfig { + if in == nil { + return nil + } + out := new(IPsecConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv4GatewayConfig) DeepCopyInto(out *IPv4GatewayConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv4GatewayConfig. +func (in *IPv4GatewayConfig) DeepCopy() *IPv4GatewayConfig { + if in == nil { + return nil + } + out := new(IPv4GatewayConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv4OVNKubernetesConfig) DeepCopyInto(out *IPv4OVNKubernetesConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv4OVNKubernetesConfig. +func (in *IPv4OVNKubernetesConfig) DeepCopy() *IPv4OVNKubernetesConfig { + if in == nil { + return nil + } + out := new(IPv4OVNKubernetesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv6GatewayConfig) DeepCopyInto(out *IPv6GatewayConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6GatewayConfig. +func (in *IPv6GatewayConfig) DeepCopy() *IPv6GatewayConfig { + if in == nil { + return nil + } + out := new(IPv6GatewayConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv6OVNKubernetesConfig) DeepCopyInto(out *IPv6OVNKubernetesConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6OVNKubernetesConfig. +func (in *IPv6OVNKubernetesConfig) DeepCopy() *IPv6OVNKubernetesConfig { + if in == nil { + return nil + } + out := new(IPv6OVNKubernetesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ingress) DeepCopyInto(out *Ingress) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress. +func (in *Ingress) DeepCopy() *Ingress { + if in == nil { + return nil + } + out := new(Ingress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressController) DeepCopyInto(out *IngressController) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressController. +func (in *IngressController) DeepCopy() *IngressController { + if in == nil { + return nil + } + out := new(IngressController) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressController) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerCaptureHTTPCookie) DeepCopyInto(out *IngressControllerCaptureHTTPCookie) { + *out = *in + out.IngressControllerCaptureHTTPCookieUnion = in.IngressControllerCaptureHTTPCookieUnion + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPCookie. +func (in *IngressControllerCaptureHTTPCookie) DeepCopy() *IngressControllerCaptureHTTPCookie { + if in == nil { + return nil + } + out := new(IngressControllerCaptureHTTPCookie) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerCaptureHTTPCookieUnion) DeepCopyInto(out *IngressControllerCaptureHTTPCookieUnion) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPCookieUnion. +func (in *IngressControllerCaptureHTTPCookieUnion) DeepCopy() *IngressControllerCaptureHTTPCookieUnion { + if in == nil { + return nil + } + out := new(IngressControllerCaptureHTTPCookieUnion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerCaptureHTTPHeader) DeepCopyInto(out *IngressControllerCaptureHTTPHeader) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPHeader. +func (in *IngressControllerCaptureHTTPHeader) DeepCopy() *IngressControllerCaptureHTTPHeader { + if in == nil { + return nil + } + out := new(IngressControllerCaptureHTTPHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerCaptureHTTPHeaders) DeepCopyInto(out *IngressControllerCaptureHTTPHeaders) { + *out = *in + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = make([]IngressControllerCaptureHTTPHeader, len(*in)) + copy(*out, *in) + } + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = make([]IngressControllerCaptureHTTPHeader, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPHeaders. +func (in *IngressControllerCaptureHTTPHeaders) DeepCopy() *IngressControllerCaptureHTTPHeaders { + if in == nil { + return nil + } + out := new(IngressControllerCaptureHTTPHeaders) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerHTTPHeader) DeepCopyInto(out *IngressControllerHTTPHeader) { + *out = *in + in.Action.DeepCopyInto(&out.Action) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPHeader. +func (in *IngressControllerHTTPHeader) DeepCopy() *IngressControllerHTTPHeader { + if in == nil { + return nil + } + out := new(IngressControllerHTTPHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerHTTPHeaderActionUnion) DeepCopyInto(out *IngressControllerHTTPHeaderActionUnion) { + *out = *in + if in.Set != nil { + in, out := &in.Set, &out.Set + *out = new(IngressControllerSetHTTPHeader) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPHeaderActionUnion. +func (in *IngressControllerHTTPHeaderActionUnion) DeepCopy() *IngressControllerHTTPHeaderActionUnion { + if in == nil { + return nil + } + out := new(IngressControllerHTTPHeaderActionUnion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerHTTPHeaderActions) DeepCopyInto(out *IngressControllerHTTPHeaderActions) { + *out = *in + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = make([]IngressControllerHTTPHeader, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = make([]IngressControllerHTTPHeader, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPHeaderActions. +func (in *IngressControllerHTTPHeaderActions) DeepCopy() *IngressControllerHTTPHeaderActions { + if in == nil { + return nil + } + out := new(IngressControllerHTTPHeaderActions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerHTTPHeaders) DeepCopyInto(out *IngressControllerHTTPHeaders) { + *out = *in + out.UniqueId = in.UniqueId + if in.HeaderNameCaseAdjustments != nil { + in, out := &in.HeaderNameCaseAdjustments, &out.HeaderNameCaseAdjustments + *out = make([]IngressControllerHTTPHeaderNameCaseAdjustment, len(*in)) + copy(*out, *in) + } + in.Actions.DeepCopyInto(&out.Actions) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPHeaders. +func (in *IngressControllerHTTPHeaders) DeepCopy() *IngressControllerHTTPHeaders { + if in == nil { + return nil + } + out := new(IngressControllerHTTPHeaders) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerHTTPUniqueIdHeaderPolicy) DeepCopyInto(out *IngressControllerHTTPUniqueIdHeaderPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPUniqueIdHeaderPolicy. +func (in *IngressControllerHTTPUniqueIdHeaderPolicy) DeepCopy() *IngressControllerHTTPUniqueIdHeaderPolicy { + if in == nil { + return nil + } + out := new(IngressControllerHTTPUniqueIdHeaderPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerList) DeepCopyInto(out *IngressControllerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IngressController, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerList. +func (in *IngressControllerList) DeepCopy() *IngressControllerList { + if in == nil { + return nil + } + out := new(IngressControllerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressControllerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerLogging) DeepCopyInto(out *IngressControllerLogging) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = new(AccessLogging) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerLogging. +func (in *IngressControllerLogging) DeepCopy() *IngressControllerLogging { + if in == nil { + return nil + } + out := new(IngressControllerLogging) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerSetHTTPHeader) DeepCopyInto(out *IngressControllerSetHTTPHeader) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerSetHTTPHeader. +func (in *IngressControllerSetHTTPHeader) DeepCopy() *IngressControllerSetHTTPHeader { + if in == nil { + return nil + } + out := new(IngressControllerSetHTTPHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerSpec) DeepCopyInto(out *IngressControllerSpec) { + *out = *in + out.HttpErrorCodePages = in.HttpErrorCodePages + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.EndpointPublishingStrategy != nil { + in, out := &in.EndpointPublishingStrategy, &out.EndpointPublishingStrategy + *out = new(EndpointPublishingStrategy) + (*in).DeepCopyInto(*out) + } + if in.DefaultCertificate != nil { + in, out := &in.DefaultCertificate, &out.DefaultCertificate + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.RouteSelector != nil { + in, out := &in.RouteSelector, &out.RouteSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.NodePlacement != nil { + in, out := &in.NodePlacement, &out.NodePlacement + *out = new(NodePlacement) + (*in).DeepCopyInto(*out) + } + if in.TLSSecurityProfile != nil { + in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile + *out = new(configv1.TLSSecurityProfile) + (*in).DeepCopyInto(*out) + } + in.ClientTLS.DeepCopyInto(&out.ClientTLS) + if in.RouteAdmission != nil { + in, out := &in.RouteAdmission, &out.RouteAdmission + *out = new(RouteAdmissionPolicy) + **out = **in + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(IngressControllerLogging) + (*in).DeepCopyInto(*out) + } + if in.HTTPHeaders != nil { + in, out := &in.HTTPHeaders, &out.HTTPHeaders + *out = new(IngressControllerHTTPHeaders) + (*in).DeepCopyInto(*out) + } + in.TuningOptions.DeepCopyInto(&out.TuningOptions) + in.UnsupportedConfigOverrides.DeepCopyInto(&out.UnsupportedConfigOverrides) + in.HTTPCompression.DeepCopyInto(&out.HTTPCompression) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerSpec. +func (in *IngressControllerSpec) DeepCopy() *IngressControllerSpec { + if in == nil { + return nil + } + out := new(IngressControllerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerStatus) DeepCopyInto(out *IngressControllerStatus) { + *out = *in + if in.EndpointPublishingStrategy != nil { + in, out := &in.EndpointPublishingStrategy, &out.EndpointPublishingStrategy + *out = new(EndpointPublishingStrategy) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]OperatorCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TLSProfile != nil { + in, out := &in.TLSProfile, &out.TLSProfile + *out = new(configv1.TLSProfileSpec) + (*in).DeepCopyInto(*out) + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.RouteSelector != nil { + in, out := &in.RouteSelector, &out.RouteSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerStatus. +func (in *IngressControllerStatus) DeepCopy() *IngressControllerStatus { + if in == nil { + return nil + } + out := new(IngressControllerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerTuningOptions) DeepCopyInto(out *IngressControllerTuningOptions) { + *out = *in + if in.ClientTimeout != nil { + in, out := &in.ClientTimeout, &out.ClientTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.ClientFinTimeout != nil { + in, out := &in.ClientFinTimeout, &out.ClientFinTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.ServerTimeout != nil { + in, out := &in.ServerTimeout, &out.ServerTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.ServerFinTimeout != nil { + in, out := &in.ServerFinTimeout, &out.ServerFinTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.TunnelTimeout != nil { + in, out := &in.TunnelTimeout, &out.TunnelTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.ConnectTimeout != nil { + in, out := &in.ConnectTimeout, &out.ConnectTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.TLSInspectDelay != nil { + in, out := &in.TLSInspectDelay, &out.TLSInspectDelay + *out = new(metav1.Duration) + **out = **in + } + if in.HealthCheckInterval != nil { + in, out := &in.HealthCheckInterval, &out.HealthCheckInterval + *out = new(metav1.Duration) + **out = **in + } + out.ReloadInterval = in.ReloadInterval + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerTuningOptions. +func (in *IngressControllerTuningOptions) DeepCopy() *IngressControllerTuningOptions { + if in == nil { + return nil + } + out := new(IngressControllerTuningOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsOperator) DeepCopyInto(out *InsightsOperator) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsOperator. +func (in *InsightsOperator) DeepCopy() *InsightsOperator { + if in == nil { + return nil + } + out := new(InsightsOperator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InsightsOperator) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsOperatorList) DeepCopyInto(out *InsightsOperatorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]InsightsOperator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsOperatorList. +func (in *InsightsOperatorList) DeepCopy() *InsightsOperatorList { + if in == nil { + return nil + } + out := new(InsightsOperatorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InsightsOperatorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsOperatorSpec) DeepCopyInto(out *InsightsOperatorSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsOperatorSpec. +func (in *InsightsOperatorSpec) DeepCopy() *InsightsOperatorSpec { + if in == nil { + return nil + } + out := new(InsightsOperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsOperatorStatus) DeepCopyInto(out *InsightsOperatorStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + in.GatherStatus.DeepCopyInto(&out.GatherStatus) + in.InsightsReport.DeepCopyInto(&out.InsightsReport) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsOperatorStatus. +func (in *InsightsOperatorStatus) DeepCopy() *InsightsOperatorStatus { + if in == nil { + return nil + } + out := new(InsightsOperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsReport) DeepCopyInto(out *InsightsReport) { + *out = *in + in.DownloadedAt.DeepCopyInto(&out.DownloadedAt) + if in.HealthChecks != nil { + in, out := &in.HealthChecks, &out.HealthChecks + *out = make([]HealthCheck, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsReport. +func (in *InsightsReport) DeepCopy() *InsightsReport { + if in == nil { + return nil + } + out := new(InsightsReport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServer) DeepCopyInto(out *KubeAPIServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServer. +func (in *KubeAPIServer) DeepCopy() *KubeAPIServer { + if in == nil { + return nil + } + out := new(KubeAPIServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeAPIServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerList) DeepCopyInto(out *KubeAPIServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeAPIServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerList. +func (in *KubeAPIServerList) DeepCopy() *KubeAPIServerList { + if in == nil { + return nil + } + out := new(KubeAPIServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeAPIServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerSpec) DeepCopyInto(out *KubeAPIServerSpec) { + *out = *in + in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerSpec. +func (in *KubeAPIServerSpec) DeepCopy() *KubeAPIServerSpec { + if in == nil { + return nil + } + out := new(KubeAPIServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerStatus) DeepCopyInto(out *KubeAPIServerStatus) { + *out = *in + in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) + if in.ServiceAccountIssuers != nil { + in, out := &in.ServiceAccountIssuers, &out.ServiceAccountIssuers + *out = make([]ServiceAccountIssuerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerStatus. +func (in *KubeAPIServerStatus) DeepCopy() *KubeAPIServerStatus { + if in == nil { + return nil + } + out := new(KubeAPIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManager) DeepCopyInto(out *KubeControllerManager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManager. +func (in *KubeControllerManager) DeepCopy() *KubeControllerManager { + if in == nil { + return nil + } + out := new(KubeControllerManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeControllerManager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerList) DeepCopyInto(out *KubeControllerManagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeControllerManager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerList. +func (in *KubeControllerManagerList) DeepCopy() *KubeControllerManagerList { + if in == nil { + return nil + } + out := new(KubeControllerManagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeControllerManagerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerSpec) DeepCopyInto(out *KubeControllerManagerSpec) { + *out = *in + in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerSpec. +func (in *KubeControllerManagerSpec) DeepCopy() *KubeControllerManagerSpec { + if in == nil { + return nil + } + out := new(KubeControllerManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerStatus) DeepCopyInto(out *KubeControllerManagerStatus) { + *out = *in + in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerStatus. +func (in *KubeControllerManagerStatus) DeepCopy() *KubeControllerManagerStatus { + if in == nil { + return nil + } + out := new(KubeControllerManagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeScheduler) DeepCopyInto(out *KubeScheduler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeScheduler. +func (in *KubeScheduler) DeepCopy() *KubeScheduler { + if in == nil { + return nil + } + out := new(KubeScheduler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeScheduler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerList) DeepCopyInto(out *KubeSchedulerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeScheduler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerList. +func (in *KubeSchedulerList) DeepCopy() *KubeSchedulerList { + if in == nil { + return nil + } + out := new(KubeSchedulerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeSchedulerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerSpec) DeepCopyInto(out *KubeSchedulerSpec) { + *out = *in + in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerSpec. +func (in *KubeSchedulerSpec) DeepCopy() *KubeSchedulerSpec { + if in == nil { + return nil + } + out := new(KubeSchedulerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerStatus) DeepCopyInto(out *KubeSchedulerStatus) { + *out = *in + in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerStatus. +func (in *KubeSchedulerStatus) DeepCopy() *KubeSchedulerStatus { + if in == nil { + return nil + } + out := new(KubeSchedulerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeStorageVersionMigrator) DeepCopyInto(out *KubeStorageVersionMigrator) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigrator. +func (in *KubeStorageVersionMigrator) DeepCopy() *KubeStorageVersionMigrator { + if in == nil { + return nil + } + out := new(KubeStorageVersionMigrator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeStorageVersionMigrator) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeStorageVersionMigratorList) DeepCopyInto(out *KubeStorageVersionMigratorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeStorageVersionMigrator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigratorList. +func (in *KubeStorageVersionMigratorList) DeepCopy() *KubeStorageVersionMigratorList { + if in == nil { + return nil + } + out := new(KubeStorageVersionMigratorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeStorageVersionMigratorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeStorageVersionMigratorSpec) DeepCopyInto(out *KubeStorageVersionMigratorSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigratorSpec. +func (in *KubeStorageVersionMigratorSpec) DeepCopy() *KubeStorageVersionMigratorSpec { + if in == nil { + return nil + } + out := new(KubeStorageVersionMigratorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeStorageVersionMigratorStatus) DeepCopyInto(out *KubeStorageVersionMigratorStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigratorStatus. +func (in *KubeStorageVersionMigratorStatus) DeepCopy() *KubeStorageVersionMigratorStatus { + if in == nil { + return nil + } + out := new(KubeStorageVersionMigratorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerStrategy) DeepCopyInto(out *LoadBalancerStrategy) { + *out = *in + if in.AllowedSourceRanges != nil { + in, out := &in.AllowedSourceRanges, &out.AllowedSourceRanges + *out = make([]CIDR, len(*in)) + copy(*out, *in) + } + if in.ProviderParameters != nil { + in, out := &in.ProviderParameters, &out.ProviderParameters + *out = new(ProviderLoadBalancerParameters) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStrategy. +func (in *LoadBalancerStrategy) DeepCopy() *LoadBalancerStrategy { + if in == nil { + return nil + } + out := new(LoadBalancerStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingDestination) DeepCopyInto(out *LoggingDestination) { + *out = *in + if in.Syslog != nil { + in, out := &in.Syslog, &out.Syslog + *out = new(SyslogLoggingDestinationParameters) + **out = **in + } + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(ContainerLoggingDestinationParameters) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingDestination. +func (in *LoggingDestination) DeepCopy() *LoggingDestination { + if in == nil { + return nil + } + out := new(LoggingDestination) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MTUMigration) DeepCopyInto(out *MTUMigration) { + *out = *in + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(MTUMigrationValues) + (*in).DeepCopyInto(*out) + } + if in.Machine != nil { + in, out := &in.Machine, &out.Machine + *out = new(MTUMigrationValues) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MTUMigration. +func (in *MTUMigration) DeepCopy() *MTUMigration { + if in == nil { + return nil + } + out := new(MTUMigration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MTUMigrationValues) DeepCopyInto(out *MTUMigrationValues) { + *out = *in + if in.To != nil { + in, out := &in.To, &out.To + *out = new(uint32) + **out = **in + } + if in.From != nil { + in, out := &in.From, &out.From + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MTUMigrationValues. +func (in *MTUMigrationValues) DeepCopy() *MTUMigrationValues { + if in == nil { + return nil + } + out := new(MTUMigrationValues) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfiguration) DeepCopyInto(out *MachineConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfiguration. +func (in *MachineConfiguration) DeepCopy() *MachineConfiguration { + if in == nil { + return nil + } + out := new(MachineConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfigurationList) DeepCopyInto(out *MachineConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MachineConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigurationList. +func (in *MachineConfigurationList) DeepCopy() *MachineConfigurationList { + if in == nil { + return nil + } + out := new(MachineConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfigurationSpec) DeepCopyInto(out *MachineConfigurationSpec) { + *out = *in + in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) + in.ManagedBootImages.DeepCopyInto(&out.ManagedBootImages) + in.NodeDisruptionPolicy.DeepCopyInto(&out.NodeDisruptionPolicy) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigurationSpec. +func (in *MachineConfigurationSpec) DeepCopy() *MachineConfigurationSpec { + if in == nil { + return nil + } + out := new(MachineConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfigurationStatus) DeepCopyInto(out *MachineConfigurationStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.NodeDisruptionPolicyStatus.DeepCopyInto(&out.NodeDisruptionPolicyStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigurationStatus. +func (in *MachineConfigurationStatus) DeepCopy() *MachineConfigurationStatus { + if in == nil { + return nil + } + out := new(MachineConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineManager) DeepCopyInto(out *MachineManager) { + *out = *in + in.Selection.DeepCopyInto(&out.Selection) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineManager. +func (in *MachineManager) DeepCopy() *MachineManager { + if in == nil { + return nil + } + out := new(MachineManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineManagerSelector) DeepCopyInto(out *MachineManagerSelector) { + *out = *in + if in.Partial != nil { + in, out := &in.Partial, &out.Partial + *out = new(PartialSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineManagerSelector. +func (in *MachineManagerSelector) DeepCopy() *MachineManagerSelector { + if in == nil { + return nil + } + out := new(MachineManagerSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedBootImages) DeepCopyInto(out *ManagedBootImages) { + *out = *in + if in.MachineManagers != nil { + in, out := &in.MachineManagers, &out.MachineManagers + *out = make([]MachineManager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedBootImages. +func (in *ManagedBootImages) DeepCopy() *ManagedBootImages { + if in == nil { + return nil + } + out := new(ManagedBootImages) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MyOperatorResource) DeepCopyInto(out *MyOperatorResource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MyOperatorResource. +func (in *MyOperatorResource) DeepCopy() *MyOperatorResource { + if in == nil { + return nil + } + out := new(MyOperatorResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MyOperatorResourceSpec) DeepCopyInto(out *MyOperatorResourceSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MyOperatorResourceSpec. +func (in *MyOperatorResourceSpec) DeepCopy() *MyOperatorResourceSpec { + if in == nil { + return nil + } + out := new(MyOperatorResourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MyOperatorResourceStatus) DeepCopyInto(out *MyOperatorResourceStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MyOperatorResourceStatus. +func (in *MyOperatorResourceStatus) DeepCopy() *MyOperatorResourceStatus { + if in == nil { + return nil + } + out := new(MyOperatorResourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetFlowConfig) DeepCopyInto(out *NetFlowConfig) { + *out = *in + if in.Collectors != nil { + in, out := &in.Collectors, &out.Collectors + *out = make([]IPPort, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetFlowConfig. +func (in *NetFlowConfig) DeepCopy() *NetFlowConfig { + if in == nil { + return nil + } + out := new(NetFlowConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Network) DeepCopyInto(out *Network) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. +func (in *Network) DeepCopy() *Network { + if in == nil { + return nil + } + out := new(Network) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Network) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkList) DeepCopyInto(out *NetworkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Network, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkList. +func (in *NetworkList) DeepCopy() *NetworkList { + if in == nil { + return nil + } + out := new(NetworkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkMigration) DeepCopyInto(out *NetworkMigration) { + *out = *in + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + *out = new(MTUMigration) + (*in).DeepCopyInto(*out) + } + if in.Features != nil { + in, out := &in.Features, &out.Features + *out = new(FeaturesMigration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkMigration. +func (in *NetworkMigration) DeepCopy() *NetworkMigration { + if in == nil { + return nil + } + out := new(NetworkMigration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + if in.ClusterNetwork != nil { + in, out := &in.ClusterNetwork, &out.ClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.ServiceNetwork != nil { + in, out := &in.ServiceNetwork, &out.ServiceNetwork + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.DefaultNetwork.DeepCopyInto(&out.DefaultNetwork) + if in.AdditionalNetworks != nil { + in, out := &in.AdditionalNetworks, &out.AdditionalNetworks + *out = make([]AdditionalNetworkDefinition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DisableMultiNetwork != nil { + in, out := &in.DisableMultiNetwork, &out.DisableMultiNetwork + *out = new(bool) + **out = **in + } + if in.UseMultiNetworkPolicy != nil { + in, out := &in.UseMultiNetworkPolicy, &out.UseMultiNetworkPolicy + *out = new(bool) + **out = **in + } + if in.DeployKubeProxy != nil { + in, out := &in.DeployKubeProxy, &out.DeployKubeProxy + *out = new(bool) + **out = **in + } + if in.KubeProxyConfig != nil { + in, out := &in.KubeProxyConfig, &out.KubeProxyConfig + *out = new(ProxyConfig) + (*in).DeepCopyInto(*out) + } + if in.ExportNetworkFlows != nil { + in, out := &in.ExportNetworkFlows, &out.ExportNetworkFlows + *out = new(ExportNetworkFlows) + (*in).DeepCopyInto(*out) + } + if in.Migration != nil { + in, out := &in.Migration, &out.Migration + *out = new(NetworkMigration) + (*in).DeepCopyInto(*out) + } + if in.AdditionalRoutingCapabilities != nil { + in, out := &in.AdditionalRoutingCapabilities, &out.AdditionalRoutingCapabilities + *out = new(AdditionalRoutingCapabilities) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. +func (in *NetworkSpec) DeepCopy() *NetworkSpec { + if in == nil { + return nil + } + out := new(NetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus. +func (in *NetworkStatus) DeepCopy() *NetworkStatus { + if in == nil { + return nil + } + out := new(NetworkStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDisruptionPolicyClusterStatus) DeepCopyInto(out *NodeDisruptionPolicyClusterStatus) { + *out = *in + if in.Files != nil { + in, out := &in.Files, &out.Files + *out = make([]NodeDisruptionPolicyStatusFile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Units != nil { + in, out := &in.Units, &out.Units + *out = make([]NodeDisruptionPolicyStatusUnit, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.SSHKey.DeepCopyInto(&out.SSHKey) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicyClusterStatus. +func (in *NodeDisruptionPolicyClusterStatus) DeepCopy() *NodeDisruptionPolicyClusterStatus { + if in == nil { + return nil + } + out := new(NodeDisruptionPolicyClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDisruptionPolicyConfig) DeepCopyInto(out *NodeDisruptionPolicyConfig) { + *out = *in + if in.Files != nil { + in, out := &in.Files, &out.Files + *out = make([]NodeDisruptionPolicySpecFile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Units != nil { + in, out := &in.Units, &out.Units + *out = make([]NodeDisruptionPolicySpecUnit, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.SSHKey.DeepCopyInto(&out.SSHKey) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicyConfig. +func (in *NodeDisruptionPolicyConfig) DeepCopy() *NodeDisruptionPolicyConfig { + if in == nil { + return nil + } + out := new(NodeDisruptionPolicyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDisruptionPolicySpecAction) DeepCopyInto(out *NodeDisruptionPolicySpecAction) { + *out = *in + if in.Reload != nil { + in, out := &in.Reload, &out.Reload + *out = new(ReloadService) + **out = **in + } + if in.Restart != nil { + in, out := &in.Restart, &out.Restart + *out = new(RestartService) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicySpecAction. +func (in *NodeDisruptionPolicySpecAction) DeepCopy() *NodeDisruptionPolicySpecAction { + if in == nil { + return nil + } + out := new(NodeDisruptionPolicySpecAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDisruptionPolicySpecFile) DeepCopyInto(out *NodeDisruptionPolicySpecFile) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]NodeDisruptionPolicySpecAction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicySpecFile. +func (in *NodeDisruptionPolicySpecFile) DeepCopy() *NodeDisruptionPolicySpecFile { + if in == nil { + return nil + } + out := new(NodeDisruptionPolicySpecFile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDisruptionPolicySpecSSHKey) DeepCopyInto(out *NodeDisruptionPolicySpecSSHKey) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]NodeDisruptionPolicySpecAction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicySpecSSHKey. +func (in *NodeDisruptionPolicySpecSSHKey) DeepCopy() *NodeDisruptionPolicySpecSSHKey { + if in == nil { + return nil + } + out := new(NodeDisruptionPolicySpecSSHKey) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDisruptionPolicySpecUnit) DeepCopyInto(out *NodeDisruptionPolicySpecUnit) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]NodeDisruptionPolicySpecAction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicySpecUnit. +func (in *NodeDisruptionPolicySpecUnit) DeepCopy() *NodeDisruptionPolicySpecUnit { + if in == nil { + return nil + } + out := new(NodeDisruptionPolicySpecUnit) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDisruptionPolicyStatus) DeepCopyInto(out *NodeDisruptionPolicyStatus) { + *out = *in + in.ClusterPolicies.DeepCopyInto(&out.ClusterPolicies) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicyStatus. +func (in *NodeDisruptionPolicyStatus) DeepCopy() *NodeDisruptionPolicyStatus { + if in == nil { + return nil + } + out := new(NodeDisruptionPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDisruptionPolicyStatusAction) DeepCopyInto(out *NodeDisruptionPolicyStatusAction) { + *out = *in + if in.Reload != nil { + in, out := &in.Reload, &out.Reload + *out = new(ReloadService) + **out = **in + } + if in.Restart != nil { + in, out := &in.Restart, &out.Restart + *out = new(RestartService) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicyStatusAction. +func (in *NodeDisruptionPolicyStatusAction) DeepCopy() *NodeDisruptionPolicyStatusAction { + if in == nil { + return nil + } + out := new(NodeDisruptionPolicyStatusAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDisruptionPolicyStatusFile) DeepCopyInto(out *NodeDisruptionPolicyStatusFile) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]NodeDisruptionPolicyStatusAction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicyStatusFile. +func (in *NodeDisruptionPolicyStatusFile) DeepCopy() *NodeDisruptionPolicyStatusFile { + if in == nil { + return nil + } + out := new(NodeDisruptionPolicyStatusFile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDisruptionPolicyStatusSSHKey) DeepCopyInto(out *NodeDisruptionPolicyStatusSSHKey) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]NodeDisruptionPolicyStatusAction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicyStatusSSHKey. +func (in *NodeDisruptionPolicyStatusSSHKey) DeepCopy() *NodeDisruptionPolicyStatusSSHKey { + if in == nil { + return nil + } + out := new(NodeDisruptionPolicyStatusSSHKey) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDisruptionPolicyStatusUnit) DeepCopyInto(out *NodeDisruptionPolicyStatusUnit) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]NodeDisruptionPolicyStatusAction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicyStatusUnit. +func (in *NodeDisruptionPolicyStatusUnit) DeepCopy() *NodeDisruptionPolicyStatusUnit { + if in == nil { + return nil + } + out := new(NodeDisruptionPolicyStatusUnit) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePlacement) DeepCopyInto(out *NodePlacement) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePlacement. +func (in *NodePlacement) DeepCopy() *NodePlacement { + if in == nil { + return nil + } + out := new(NodePlacement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePortStrategy) DeepCopyInto(out *NodePortStrategy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePortStrategy. +func (in *NodePortStrategy) DeepCopy() *NodePortStrategy { + if in == nil { + return nil + } + out := new(NodePortStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { + *out = *in + if in.LastFailedTime != nil { + in, out := &in.LastFailedTime, &out.LastFailedTime + *out = (*in).DeepCopy() + } + if in.LastFailedRevisionErrors != nil { + in, out := &in.LastFailedRevisionErrors, &out.LastFailedRevisionErrors + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. +func (in *NodeStatus) DeepCopy() *NodeStatus { + if in == nil { + return nil + } + out := new(NodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthAPIServerStatus) DeepCopyInto(out *OAuthAPIServerStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAPIServerStatus. +func (in *OAuthAPIServerStatus) DeepCopy() *OAuthAPIServerStatus { + if in == nil { + return nil + } + out := new(OAuthAPIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLM) DeepCopyInto(out *OLM) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLM. +func (in *OLM) DeepCopy() *OLM { + if in == nil { + return nil + } + out := new(OLM) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OLM) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLMList) DeepCopyInto(out *OLMList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OLM, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMList. +func (in *OLMList) DeepCopy() *OLMList { + if in == nil { + return nil + } + out := new(OLMList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OLMList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLMSpec) DeepCopyInto(out *OLMSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMSpec. +func (in *OLMSpec) DeepCopy() *OLMSpec { + if in == nil { + return nil + } + out := new(OLMSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLMStatus) DeepCopyInto(out *OLMStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMStatus. +func (in *OLMStatus) DeepCopy() *OLMStatus { + if in == nil { + return nil + } + out := new(OLMStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OVNKubernetesConfig) DeepCopyInto(out *OVNKubernetesConfig) { + *out = *in + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + *out = new(uint32) + **out = **in + } + if in.GenevePort != nil { + in, out := &in.GenevePort, &out.GenevePort + *out = new(uint32) + **out = **in + } + if in.HybridOverlayConfig != nil { + in, out := &in.HybridOverlayConfig, &out.HybridOverlayConfig + *out = new(HybridOverlayConfig) + (*in).DeepCopyInto(*out) + } + if in.IPsecConfig != nil { + in, out := &in.IPsecConfig, &out.IPsecConfig + *out = new(IPsecConfig) + **out = **in + } + if in.PolicyAuditConfig != nil { + in, out := &in.PolicyAuditConfig, &out.PolicyAuditConfig + *out = new(PolicyAuditConfig) + (*in).DeepCopyInto(*out) + } + if in.GatewayConfig != nil { + in, out := &in.GatewayConfig, &out.GatewayConfig + *out = new(GatewayConfig) + **out = **in + } + in.EgressIPConfig.DeepCopyInto(&out.EgressIPConfig) + if in.IPv4 != nil { + in, out := &in.IPv4, &out.IPv4 + *out = new(IPv4OVNKubernetesConfig) + **out = **in + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(IPv6OVNKubernetesConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OVNKubernetesConfig. +func (in *OVNKubernetesConfig) DeepCopy() *OVNKubernetesConfig { + if in == nil { + return nil + } + out := new(OVNKubernetesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftAPIServer) DeepCopyInto(out *OpenShiftAPIServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServer. +func (in *OpenShiftAPIServer) DeepCopy() *OpenShiftAPIServer { + if in == nil { + return nil + } + out := new(OpenShiftAPIServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftAPIServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftAPIServerList) DeepCopyInto(out *OpenShiftAPIServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OpenShiftAPIServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerList. +func (in *OpenShiftAPIServerList) DeepCopy() *OpenShiftAPIServerList { + if in == nil { + return nil + } + out := new(OpenShiftAPIServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftAPIServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftAPIServerSpec) DeepCopyInto(out *OpenShiftAPIServerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerSpec. +func (in *OpenShiftAPIServerSpec) DeepCopy() *OpenShiftAPIServerSpec { + if in == nil { + return nil + } + out := new(OpenShiftAPIServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftAPIServerStatus) DeepCopyInto(out *OpenShiftAPIServerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerStatus. +func (in *OpenShiftAPIServerStatus) DeepCopy() *OpenShiftAPIServerStatus { + if in == nil { + return nil + } + out := new(OpenShiftAPIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftControllerManager) DeepCopyInto(out *OpenShiftControllerManager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManager. +func (in *OpenShiftControllerManager) DeepCopy() *OpenShiftControllerManager { + if in == nil { + return nil + } + out := new(OpenShiftControllerManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftControllerManager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftControllerManagerList) DeepCopyInto(out *OpenShiftControllerManagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OpenShiftControllerManager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerList. +func (in *OpenShiftControllerManagerList) DeepCopy() *OpenShiftControllerManagerList { + if in == nil { + return nil + } + out := new(OpenShiftControllerManagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftControllerManagerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftControllerManagerSpec) DeepCopyInto(out *OpenShiftControllerManagerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerSpec. +func (in *OpenShiftControllerManagerSpec) DeepCopy() *OpenShiftControllerManagerSpec { + if in == nil { + return nil + } + out := new(OpenShiftControllerManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftControllerManagerStatus) DeepCopyInto(out *OpenShiftControllerManagerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerStatus. +func (in *OpenShiftControllerManagerStatus) DeepCopy() *OpenShiftControllerManagerStatus { + if in == nil { + return nil + } + out := new(OpenShiftControllerManagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftSDNConfig) DeepCopyInto(out *OpenShiftSDNConfig) { + *out = *in + if in.VXLANPort != nil { + in, out := &in.VXLANPort, &out.VXLANPort + *out = new(uint32) + **out = **in + } + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + *out = new(uint32) + **out = **in + } + if in.UseExternalOpenvswitch != nil { + in, out := &in.UseExternalOpenvswitch, &out.UseExternalOpenvswitch + *out = new(bool) + **out = **in + } + if in.EnableUnidling != nil { + in, out := &in.EnableUnidling, &out.EnableUnidling + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftSDNConfig. +func (in *OpenShiftSDNConfig) DeepCopy() *OpenShiftSDNConfig { + if in == nil { + return nil + } + out := new(OpenShiftSDNConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackLoadBalancerParameters) DeepCopyInto(out *OpenStackLoadBalancerParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackLoadBalancerParameters. +func (in *OpenStackLoadBalancerParameters) DeepCopy() *OpenStackLoadBalancerParameters { + if in == nil { + return nil + } + out := new(OpenStackLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorCondition) DeepCopyInto(out *OperatorCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorCondition. +func (in *OperatorCondition) DeepCopy() *OperatorCondition { + if in == nil { + return nil + } + out := new(OperatorCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorSpec) DeepCopyInto(out *OperatorSpec) { + *out = *in + in.UnsupportedConfigOverrides.DeepCopyInto(&out.UnsupportedConfigOverrides) + in.ObservedConfig.DeepCopyInto(&out.ObservedConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorSpec. +func (in *OperatorSpec) DeepCopy() *OperatorSpec { + if in == nil { + return nil + } + out := new(OperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorStatus) DeepCopyInto(out *OperatorStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]OperatorCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Generations != nil { + in, out := &in.Generations, &out.Generations + *out = make([]GenerationStatus, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorStatus. +func (in *OperatorStatus) DeepCopy() *OperatorStatus { + if in == nil { + return nil + } + out := new(OperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartialSelector) DeepCopyInto(out *PartialSelector) { + *out = *in + if in.MachineResourceSelector != nil { + in, out := &in.MachineResourceSelector, &out.MachineResourceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialSelector. +func (in *PartialSelector) DeepCopy() *PartialSelector { + if in == nil { + return nil + } + out := new(PartialSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Perspective) DeepCopyInto(out *Perspective) { + *out = *in + in.Visibility.DeepCopyInto(&out.Visibility) + if in.PinnedResources != nil { + in, out := &in.PinnedResources, &out.PinnedResources + *out = new([]PinnedResourceReference) + if **in != nil { + in, out := *in, *out + *out = make([]PinnedResourceReference, len(*in)) + copy(*out, *in) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Perspective. +func (in *Perspective) DeepCopy() *Perspective { + if in == nil { + return nil + } + out := new(Perspective) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerspectiveVisibility) DeepCopyInto(out *PerspectiveVisibility) { + *out = *in + if in.AccessReview != nil { + in, out := &in.AccessReview, &out.AccessReview + *out = new(ResourceAttributesAccessReview) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerspectiveVisibility. +func (in *PerspectiveVisibility) DeepCopy() *PerspectiveVisibility { + if in == nil { + return nil + } + out := new(PerspectiveVisibility) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PinnedResourceReference) DeepCopyInto(out *PinnedResourceReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PinnedResourceReference. +func (in *PinnedResourceReference) DeepCopy() *PinnedResourceReference { + if in == nil { + return nil + } + out := new(PinnedResourceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyAuditConfig) DeepCopyInto(out *PolicyAuditConfig) { + *out = *in + if in.RateLimit != nil { + in, out := &in.RateLimit, &out.RateLimit + *out = new(uint32) + **out = **in + } + if in.MaxFileSize != nil { + in, out := &in.MaxFileSize, &out.MaxFileSize + *out = new(uint32) + **out = **in + } + if in.MaxLogFiles != nil { + in, out := &in.MaxLogFiles, &out.MaxLogFiles + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyAuditConfig. +func (in *PolicyAuditConfig) DeepCopy() *PolicyAuditConfig { + if in == nil { + return nil + } + out := new(PolicyAuditConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateStrategy) DeepCopyInto(out *PrivateStrategy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateStrategy. +func (in *PrivateStrategy) DeepCopy() *PrivateStrategy { + if in == nil { + return nil + } + out := new(PrivateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectAccess) DeepCopyInto(out *ProjectAccess) { + *out = *in + if in.AvailableClusterRoles != nil { + in, out := &in.AvailableClusterRoles, &out.AvailableClusterRoles + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectAccess. +func (in *ProjectAccess) DeepCopy() *ProjectAccess { + if in == nil { + return nil + } + out := new(ProjectAccess) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderLoadBalancerParameters) DeepCopyInto(out *ProviderLoadBalancerParameters) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSLoadBalancerParameters) + (*in).DeepCopyInto(*out) + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(GCPLoadBalancerParameters) + **out = **in + } + if in.IBM != nil { + in, out := &in.IBM, &out.IBM + *out = new(IBMLoadBalancerParameters) + **out = **in + } + if in.OpenStack != nil { + in, out := &in.OpenStack, &out.OpenStack + *out = new(OpenStackLoadBalancerParameters) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderLoadBalancerParameters. +func (in *ProviderLoadBalancerParameters) DeepCopy() *ProviderLoadBalancerParameters { + if in == nil { + return nil + } + out := new(ProviderLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ProxyArgumentList) DeepCopyInto(out *ProxyArgumentList) { + { + in := &in + *out = make(ProxyArgumentList, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyArgumentList. +func (in ProxyArgumentList) DeepCopy() ProxyArgumentList { + if in == nil { + return nil + } + out := new(ProxyArgumentList) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) { + *out = *in + if in.ProxyArguments != nil { + in, out := &in.ProxyArguments, &out.ProxyArguments + *out = make(map[string]ProxyArgumentList, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(ProxyArgumentList, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. +func (in *ProxyConfig) DeepCopy() *ProxyConfig { + if in == nil { + return nil + } + out := new(ProxyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuickStarts) DeepCopyInto(out *QuickStarts) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuickStarts. +func (in *QuickStarts) DeepCopy() *QuickStarts { + if in == nil { + return nil + } + out := new(QuickStarts) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReloadService) DeepCopyInto(out *ReloadService) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReloadService. +func (in *ReloadService) DeepCopy() *ReloadService { + if in == nil { + return nil + } + out := new(ReloadService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAttributesAccessReview) DeepCopyInto(out *ResourceAttributesAccessReview) { + *out = *in + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = make([]authorizationv1.ResourceAttributes, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Missing != nil { + in, out := &in.Missing, &out.Missing + *out = make([]authorizationv1.ResourceAttributes, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAttributesAccessReview. +func (in *ResourceAttributesAccessReview) DeepCopy() *ResourceAttributesAccessReview { + if in == nil { + return nil + } + out := new(ResourceAttributesAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestartService) DeepCopyInto(out *RestartService) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestartService. +func (in *RestartService) DeepCopy() *RestartService { + if in == nil { + return nil + } + out := new(RestartService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteAdmissionPolicy) DeepCopyInto(out *RouteAdmissionPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteAdmissionPolicy. +func (in *RouteAdmissionPolicy) DeepCopy() *RouteAdmissionPolicy { + if in == nil { + return nil + } + out := new(RouteAdmissionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SFlowConfig) DeepCopyInto(out *SFlowConfig) { + *out = *in + if in.Collectors != nil { + in, out := &in.Collectors, &out.Collectors + *out = make([]IPPort, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SFlowConfig. +func (in *SFlowConfig) DeepCopy() *SFlowConfig { + if in == nil { + return nil + } + out := new(SFlowConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Server) DeepCopyInto(out *Server) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.ForwardPlugin.DeepCopyInto(&out.ForwardPlugin) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Server. +func (in *Server) DeepCopy() *Server { + if in == nil { + return nil + } + out := new(Server) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountIssuerStatus) DeepCopyInto(out *ServiceAccountIssuerStatus) { + *out = *in + if in.ExpirationTime != nil { + in, out := &in.ExpirationTime, &out.ExpirationTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIssuerStatus. +func (in *ServiceAccountIssuerStatus) DeepCopy() *ServiceAccountIssuerStatus { + if in == nil { + return nil + } + out := new(ServiceAccountIssuerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCA) DeepCopyInto(out *ServiceCA) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCA. +func (in *ServiceCA) DeepCopy() *ServiceCA { + if in == nil { + return nil + } + out := new(ServiceCA) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCA) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCAList) DeepCopyInto(out *ServiceCAList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCA, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCAList. +func (in *ServiceCAList) DeepCopy() *ServiceCAList { + if in == nil { + return nil + } + out := new(ServiceCAList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCAList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCASpec) DeepCopyInto(out *ServiceCASpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCASpec. +func (in *ServiceCASpec) DeepCopy() *ServiceCASpec { + if in == nil { + return nil + } + out := new(ServiceCASpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCAStatus) DeepCopyInto(out *ServiceCAStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCAStatus. +func (in *ServiceCAStatus) DeepCopy() *ServiceCAStatus { + if in == nil { + return nil + } + out := new(ServiceCAStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogAPIServer) DeepCopyInto(out *ServiceCatalogAPIServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServer. +func (in *ServiceCatalogAPIServer) DeepCopy() *ServiceCatalogAPIServer { + if in == nil { + return nil + } + out := new(ServiceCatalogAPIServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCatalogAPIServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogAPIServerList) DeepCopyInto(out *ServiceCatalogAPIServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCatalogAPIServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServerList. +func (in *ServiceCatalogAPIServerList) DeepCopy() *ServiceCatalogAPIServerList { + if in == nil { + return nil + } + out := new(ServiceCatalogAPIServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCatalogAPIServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogAPIServerSpec) DeepCopyInto(out *ServiceCatalogAPIServerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServerSpec. +func (in *ServiceCatalogAPIServerSpec) DeepCopy() *ServiceCatalogAPIServerSpec { + if in == nil { + return nil + } + out := new(ServiceCatalogAPIServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogAPIServerStatus) DeepCopyInto(out *ServiceCatalogAPIServerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServerStatus. +func (in *ServiceCatalogAPIServerStatus) DeepCopy() *ServiceCatalogAPIServerStatus { + if in == nil { + return nil + } + out := new(ServiceCatalogAPIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogControllerManager) DeepCopyInto(out *ServiceCatalogControllerManager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManager. +func (in *ServiceCatalogControllerManager) DeepCopy() *ServiceCatalogControllerManager { + if in == nil { + return nil + } + out := new(ServiceCatalogControllerManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCatalogControllerManager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogControllerManagerList) DeepCopyInto(out *ServiceCatalogControllerManagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCatalogControllerManager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManagerList. +func (in *ServiceCatalogControllerManagerList) DeepCopy() *ServiceCatalogControllerManagerList { + if in == nil { + return nil + } + out := new(ServiceCatalogControllerManagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCatalogControllerManagerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogControllerManagerSpec) DeepCopyInto(out *ServiceCatalogControllerManagerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManagerSpec. +func (in *ServiceCatalogControllerManagerSpec) DeepCopy() *ServiceCatalogControllerManagerSpec { + if in == nil { + return nil + } + out := new(ServiceCatalogControllerManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogControllerManagerStatus) DeepCopyInto(out *ServiceCatalogControllerManagerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManagerStatus. +func (in *ServiceCatalogControllerManagerStatus) DeepCopy() *ServiceCatalogControllerManagerStatus { + if in == nil { + return nil + } + out := new(ServiceCatalogControllerManagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SimpleMacvlanConfig) DeepCopyInto(out *SimpleMacvlanConfig) { + *out = *in + if in.IPAMConfig != nil { + in, out := &in.IPAMConfig, &out.IPAMConfig + *out = new(IPAMConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleMacvlanConfig. +func (in *SimpleMacvlanConfig) DeepCopy() *SimpleMacvlanConfig { + if in == nil { + return nil + } + out := new(SimpleMacvlanConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticIPAMAddresses) DeepCopyInto(out *StaticIPAMAddresses) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMAddresses. +func (in *StaticIPAMAddresses) DeepCopy() *StaticIPAMAddresses { + if in == nil { + return nil + } + out := new(StaticIPAMAddresses) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticIPAMConfig) DeepCopyInto(out *StaticIPAMConfig) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]StaticIPAMAddresses, len(*in)) + copy(*out, *in) + } + if in.Routes != nil { + in, out := &in.Routes, &out.Routes + *out = make([]StaticIPAMRoutes, len(*in)) + copy(*out, *in) + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(StaticIPAMDNS) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMConfig. +func (in *StaticIPAMConfig) DeepCopy() *StaticIPAMConfig { + if in == nil { + return nil + } + out := new(StaticIPAMConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticIPAMDNS) DeepCopyInto(out *StaticIPAMDNS) { + *out = *in + if in.Nameservers != nil { + in, out := &in.Nameservers, &out.Nameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Search != nil { + in, out := &in.Search, &out.Search + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMDNS. +func (in *StaticIPAMDNS) DeepCopy() *StaticIPAMDNS { + if in == nil { + return nil + } + out := new(StaticIPAMDNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticIPAMRoutes) DeepCopyInto(out *StaticIPAMRoutes) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMRoutes. +func (in *StaticIPAMRoutes) DeepCopy() *StaticIPAMRoutes { + if in == nil { + return nil + } + out := new(StaticIPAMRoutes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticPodOperatorSpec) DeepCopyInto(out *StaticPodOperatorSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticPodOperatorSpec. +func (in *StaticPodOperatorSpec) DeepCopy() *StaticPodOperatorSpec { + if in == nil { + return nil + } + out := new(StaticPodOperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticPodOperatorStatus) DeepCopyInto(out *StaticPodOperatorStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + if in.NodeStatuses != nil { + in, out := &in.NodeStatuses, &out.NodeStatuses + *out = make([]NodeStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticPodOperatorStatus. +func (in *StaticPodOperatorStatus) DeepCopy() *StaticPodOperatorStatus { + if in == nil { + return nil + } + out := new(StaticPodOperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatuspageProvider) DeepCopyInto(out *StatuspageProvider) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatuspageProvider. +func (in *StatuspageProvider) DeepCopy() *StatuspageProvider { + if in == nil { + return nil + } + out := new(StatuspageProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Storage) DeepCopyInto(out *Storage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. +func (in *Storage) DeepCopy() *Storage { + if in == nil { + return nil + } + out := new(Storage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Storage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageList) DeepCopyInto(out *StorageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Storage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageList. +func (in *StorageList) DeepCopy() *StorageList { + if in == nil { + return nil + } + out := new(StorageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageSpec) DeepCopyInto(out *StorageSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec. +func (in *StorageSpec) DeepCopy() *StorageSpec { + if in == nil { + return nil + } + out := new(StorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageStatus) DeepCopyInto(out *StorageStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageStatus. +func (in *StorageStatus) DeepCopy() *StorageStatus { + if in == nil { + return nil + } + out := new(StorageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyslogLoggingDestinationParameters) DeepCopyInto(out *SyslogLoggingDestinationParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyslogLoggingDestinationParameters. +func (in *SyslogLoggingDestinationParameters) DeepCopy() *SyslogLoggingDestinationParameters { + if in == nil { + return nil + } + out := new(SyslogLoggingDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Upstream) DeepCopyInto(out *Upstream) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Upstream. +func (in *Upstream) DeepCopy() *Upstream { + if in == nil { + return nil + } + out := new(Upstream) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpstreamResolvers) DeepCopyInto(out *UpstreamResolvers) { + *out = *in + if in.Upstreams != nil { + in, out := &in.Upstreams, &out.Upstreams + *out = make([]Upstream, len(*in)) + copy(*out, *in) + } + in.TransportConfig.DeepCopyInto(&out.TransportConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpstreamResolvers. +func (in *UpstreamResolvers) DeepCopy() *UpstreamResolvers { + if in == nil { + return nil + } + out := new(UpstreamResolvers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereCSIDriverConfigSpec) DeepCopyInto(out *VSphereCSIDriverConfigSpec) { + *out = *in + if in.TopologyCategories != nil { + in, out := &in.TopologyCategories, &out.TopologyCategories + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GlobalMaxSnapshotsPerBlockVolume != nil { + in, out := &in.GlobalMaxSnapshotsPerBlockVolume, &out.GlobalMaxSnapshotsPerBlockVolume + *out = new(uint32) + **out = **in + } + if in.GranularMaxSnapshotsPerBlockVolumeInVSAN != nil { + in, out := &in.GranularMaxSnapshotsPerBlockVolumeInVSAN, &out.GranularMaxSnapshotsPerBlockVolumeInVSAN + *out = new(uint32) + **out = **in + } + if in.GranularMaxSnapshotsPerBlockVolumeInVVOL != nil { + in, out := &in.GranularMaxSnapshotsPerBlockVolumeInVVOL, &out.GranularMaxSnapshotsPerBlockVolumeInVVOL + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereCSIDriverConfigSpec. +func (in *VSphereCSIDriverConfigSpec) DeepCopy() *VSphereCSIDriverConfigSpec { + if in == nil { + return nil + } + out := new(VSphereCSIDriverConfigSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 0000000000000..d45d8ac3004ff --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,457 @@ +authentications.operator.openshift.io: + Annotations: + include.release.openshift.io/self-managed-high-availability: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/475 + CRDName: authentications.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: authentication + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_50" + GroupName: operator.openshift.io + HasStatus: true + KindName: Authentication + Labels: {} + PluralName: authentications + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +csisnapshotcontrollers.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/562 + CRDName: csisnapshotcontrollers.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: csi-snapshot-controller + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_80" + GroupName: operator.openshift.io + HasStatus: true + KindName: CSISnapshotController + Labels: {} + PluralName: csisnapshotcontrollers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +cloudcredentials.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/692 + CRDName: cloudcredentials.operator.openshift.io + Capability: CloudCredential + Category: "" + FeatureGates: [] + FilenameOperatorName: cloud-credential + FilenameOperatorOrdering: "00" + FilenameRunLevel: "0000_40" + GroupName: operator.openshift.io + HasStatus: true + KindName: CloudCredential + Labels: {} + PluralName: cloudcredentials + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +clustercsidrivers.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/701 + CRDName: clustercsidrivers.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: + - AWSEFSDriverVolumeMetrics + - VSphereDriverConfiguration + FilenameOperatorName: csi-driver + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_50" + GroupName: operator.openshift.io + HasStatus: true + KindName: ClusterCSIDriver + Labels: {} + PluralName: clustercsidrivers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +configs.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/612 + CRDName: configs.operator.openshift.io + Capability: "" + Category: coreoperators + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: operator.openshift.io + HasStatus: true + KindName: Config + Labels: {} + PluralName: configs + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +consoles.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/486 + CRDName: consoles.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: console + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_50" + GroupName: operator.openshift.io + HasStatus: true + KindName: Console + Labels: {} + PluralName: consoles + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +dnses.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/475 + CRDName: dnses.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: dns + FilenameOperatorOrdering: "00" + FilenameRunLevel: "0000_70" + GroupName: operator.openshift.io + HasStatus: true + KindName: DNS + Labels: {} + PluralName: dnses + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +etcds.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/752 + CRDName: etcds.operator.openshift.io + Capability: "" + Category: coreoperators + FeatureGates: + - EtcdBackendQuota + - HardwareSpeed + FilenameOperatorName: etcd + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_12" + GroupName: operator.openshift.io + HasStatus: true + KindName: Etcd + Labels: {} + PluralName: etcds + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +ingresscontrollers.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/616 + CRDName: ingresscontrollers.operator.openshift.io + Capability: Ingress + Category: "" + FeatureGates: + - IngressControllerLBSubnetsAWS + - SetEIPForNLBIngressController + FilenameOperatorName: ingress + FilenameOperatorOrdering: "00" + FilenameRunLevel: "0000_50" + GroupName: operator.openshift.io + HasStatus: true + KindName: IngressController + Labels: {} + PluralName: ingresscontrollers + PrinterColumns: [] + Scope: Namespaced + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +insightsoperators.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/1237 + CRDName: insightsoperators.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: insights + FilenameOperatorOrdering: "00" + FilenameRunLevel: "0000_50" + GroupName: operator.openshift.io + HasStatus: true + KindName: InsightsOperator + Labels: {} + PluralName: insightsoperators + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +kubeapiservers.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/475 + CRDName: kubeapiservers.operator.openshift.io + Capability: "" + Category: coreoperators + FeatureGates: [] + FilenameOperatorName: kube-apiserver + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_20" + GroupName: operator.openshift.io + HasStatus: true + KindName: KubeAPIServer + Labels: {} + PluralName: kubeapiservers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +kubecontrollermanagers.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/475 + CRDName: kubecontrollermanagers.operator.openshift.io + Capability: "" + Category: coreoperators + FeatureGates: [] + FilenameOperatorName: kube-controller-manager + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_25" + GroupName: operator.openshift.io + HasStatus: true + KindName: KubeControllerManager + Labels: {} + PluralName: kubecontrollermanagers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +kubeschedulers.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/475 + CRDName: kubeschedulers.operator.openshift.io + Capability: "" + Category: coreoperators + FeatureGates: [] + FilenameOperatorName: kube-scheduler + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_25" + GroupName: operator.openshift.io + HasStatus: true + KindName: KubeScheduler + Labels: {} + PluralName: kubeschedulers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +kubestorageversionmigrators.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/503 + CRDName: kubestorageversionmigrators.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: kube-storage-version-migrator + FilenameOperatorOrdering: "00" + FilenameRunLevel: "0000_40" + GroupName: operator.openshift.io + HasStatus: true + KindName: KubeStorageVersionMigrator + Labels: {} + PluralName: kubestorageversionmigrators + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +machineconfigurations.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/1453 + CRDName: machineconfigurations.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: + - ManagedBootImages + - NodeDisruptionPolicy + FilenameOperatorName: machine-config + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_80" + GroupName: operator.openshift.io + HasStatus: true + KindName: MachineConfiguration + Labels: {} + PluralName: machineconfigurations + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +networks.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/475 + CRDName: networks.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: + - AdditionalRoutingCapabilities + - NetworkLiveMigration + - RouteAdvertisements + FilenameOperatorName: network + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_70" + GroupName: operator.openshift.io + HasStatus: true + KindName: Network + Labels: {} + PluralName: networks + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +olms.operator.openshift.io: + Annotations: + include.release.openshift.io/ibm-cloud-managed: "false" + include.release.openshift.io/self-managed-high-availability: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/1504 + CRDName: olms.operator.openshift.io + Capability: OperatorLifecycleManagerV1 + Category: "" + FeatureGates: + - NewOLM + FilenameOperatorName: operator-lifecycle-manager + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: operator.openshift.io + HasStatus: true + KindName: OLM + Labels: {} + PluralName: olms + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - NewOLM + Version: v1 + +openshiftapiservers.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/475 + CRDName: openshiftapiservers.operator.openshift.io + Capability: "" + Category: coreoperators + FeatureGates: [] + FilenameOperatorName: openshift-apiserver + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_30" + GroupName: operator.openshift.io + HasStatus: true + KindName: OpenShiftAPIServer + Labels: {} + PluralName: openshiftapiservers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +openshiftcontrollermanagers.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/475 + CRDName: openshiftcontrollermanagers.operator.openshift.io + Capability: "" + Category: coreoperators + FeatureGates: [] + FilenameOperatorName: openshift-controller-manager + FilenameOperatorOrdering: "02" + FilenameRunLevel: "0000_50" + GroupName: operator.openshift.io + HasStatus: true + KindName: OpenShiftControllerManager + Labels: {} + PluralName: openshiftcontrollermanagers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +servicecas.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/475 + CRDName: servicecas.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: service-ca + FilenameOperatorOrdering: "02" + FilenameRunLevel: "0000_50" + GroupName: operator.openshift.io + HasStatus: true + KindName: ServiceCA + Labels: {} + PluralName: servicecas + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +storages.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/670 + CRDName: storages.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: storage + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_50" + GroupName: operator.openshift.io + HasStatus: true + KindName: Storage + Labels: {} + PluralName: storages + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000..dace7ed4f2916 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,2077 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_GenerationStatus = map[string]string{ + "": "GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made.", + "group": "group is the group of the thing you're tracking", + "resource": "resource is the resource type of the thing you're tracking", + "namespace": "namespace is where the thing you're tracking is", + "name": "name is the name of the thing you're tracking", + "lastGeneration": "lastGeneration is the last generation of the workload controller involved", + "hash": "hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps", +} + +func (GenerationStatus) SwaggerDoc() map[string]string { + return map_GenerationStatus +} + +var map_MyOperatorResource = map[string]string{ + "": "MyOperatorResource is an example operator configuration type\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (MyOperatorResource) SwaggerDoc() map[string]string { + return map_MyOperatorResource +} + +var map_NodeStatus = map[string]string{ + "": "NodeStatus provides information about the current state of a particular node managed by this operator.", + "nodeName": "nodeName is the name of the node", + "currentRevision": "currentRevision is the generation of the most recently successful deployment", + "targetRevision": "targetRevision is the generation of the deployment we're trying to apply", + "lastFailedRevision": "lastFailedRevision is the generation of the deployment we tried and failed to deploy.", + "lastFailedTime": "lastFailedTime is the time the last failed revision failed the last time.", + "lastFailedReason": "lastFailedReason is a machine readable failure reason string.", + "lastFailedCount": "lastFailedCount is how often the installer pod of the last failed revision failed.", + "lastFallbackCount": "lastFallbackCount is how often a fallback to a previous revision happened.", + "lastFailedRevisionErrors": "lastFailedRevisionErrors is a list of human readable errors during the failed deployment referenced in lastFailedRevision.", +} + +func (NodeStatus) SwaggerDoc() map[string]string { + return map_NodeStatus +} + +var map_OperatorCondition = map[string]string{ + "": "OperatorCondition is just the standard condition fields.", + "type": "type of condition in CamelCase or in foo.example.com/CamelCase.", + "status": "status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.", +} + +func (OperatorCondition) SwaggerDoc() map[string]string { + return map_OperatorCondition +} + +var map_OperatorSpec = map[string]string{ + "": "OperatorSpec contains common fields operators need. It is intended to be anonymous included inside of the Spec struct for your particular operator.", + "managementState": "managementState indicates whether and how the operator should manage the component", + "logLevel": "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands.\n\nValid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\".", + "operatorLogLevel": "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.\n\nValid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\".", + "unsupportedConfigOverrides": "unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster.", + "observedConfig": "observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator", +} + +func (OperatorSpec) SwaggerDoc() map[string]string { + return map_OperatorSpec +} + +var map_OperatorStatus = map[string]string{ + "observedGeneration": "observedGeneration is the last generation change you've dealt with", + "conditions": "conditions is a list of conditions and their status", + "version": "version is the level this availability applies to", + "readyReplicas": "readyReplicas indicates how many replicas are ready and at the desired state", + "latestAvailableRevision": "latestAvailableRevision is the deploymentID of the most recent deployment", + "generations": "generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction.", +} + +func (OperatorStatus) SwaggerDoc() map[string]string { + return map_OperatorStatus +} + +var map_StaticPodOperatorSpec = map[string]string{ + "": "StaticPodOperatorSpec is spec for controllers that manage static pods.", + "forceRedeploymentReason": "forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config.", + "failedRevisionLimit": "failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default)", + "succeededRevisionLimit": "succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default)", +} + +func (StaticPodOperatorSpec) SwaggerDoc() map[string]string { + return map_StaticPodOperatorSpec +} + +var map_StaticPodOperatorStatus = map[string]string{ + "": "StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual node status must be tracked.", + "latestAvailableRevisionReason": "latestAvailableRevisionReason describe the detailed reason for the most recent deployment", + "nodeStatuses": "nodeStatuses track the deployment values and errors across individual nodes", +} + +func (StaticPodOperatorStatus) SwaggerDoc() map[string]string { + return map_StaticPodOperatorStatus +} + +var map_Authentication = map[string]string{ + "": "Authentication provides information to configure an operator to manage authentication.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (Authentication) SwaggerDoc() map[string]string { + return map_Authentication +} + +var map_AuthenticationList = map[string]string{ + "": "AuthenticationList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (AuthenticationList) SwaggerDoc() map[string]string { + return map_AuthenticationList +} + +var map_AuthenticationStatus = map[string]string{ + "oauthAPIServer": "oauthAPIServer holds status specific only to oauth-apiserver", +} + +func (AuthenticationStatus) SwaggerDoc() map[string]string { + return map_AuthenticationStatus +} + +var map_OAuthAPIServerStatus = map[string]string{ + "latestAvailableRevision": "latestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods.", +} + +func (OAuthAPIServerStatus) SwaggerDoc() map[string]string { + return map_OAuthAPIServerStatus +} + +var map_CloudCredential = map[string]string{ + "": "CloudCredential provides a means to configure an operator to manage CredentialsRequests.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (CloudCredential) SwaggerDoc() map[string]string { + return map_CloudCredential +} + +var map_CloudCredentialList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (CloudCredentialList) SwaggerDoc() map[string]string { + return map_CloudCredentialList +} + +var map_CloudCredentialSpec = map[string]string{ + "": "CloudCredentialSpec is the specification of the desired behavior of the cloud-credential-operator.", + "credentialsMode": "credentialsMode allows informing CCO that it should not attempt to dynamically determine the root cloud credentials capabilities, and it should just run in the specified mode. It also allows putting the operator into \"manual\" mode if desired. Leaving the field in default mode runs CCO so that the cluster's cloud credentials will be dynamically probed for capabilities (on supported clouds/platforms). Supported modes:\n AWS/Azure/GCP: \"\" (Default), \"Mint\", \"Passthrough\", \"Manual\"\n Others: Do not set value as other platforms only support running in \"Passthrough\"", +} + +func (CloudCredentialSpec) SwaggerDoc() map[string]string { + return map_CloudCredentialSpec +} + +var map_CloudCredentialStatus = map[string]string{ + "": "CloudCredentialStatus defines the observed status of the cloud-credential-operator.", +} + +func (CloudCredentialStatus) SwaggerDoc() map[string]string { + return map_CloudCredentialStatus +} + +var map_Config = map[string]string{ + "": "Config specifies the behavior of the config operator which is responsible for creating the initial configuration of other components on the cluster. The operator also handles installation, migration or synchronization of cloud configurations for AWS and Azure cloud based clusters\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the Config Operator.", + "status": "status defines the observed status of the Config Operator.", +} + +func (Config) SwaggerDoc() map[string]string { + return map_Config +} + +var map_ConfigList = map[string]string{ + "": "ConfigList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (ConfigList) SwaggerDoc() map[string]string { + return map_ConfigList +} + +var map_AddPage = map[string]string{ + "": "AddPage allows customizing actions on the Add page in developer perspective.", + "disabledActions": "disabledActions is a list of actions that are not shown to users. Each action in the list is represented by its ID.", +} + +func (AddPage) SwaggerDoc() map[string]string { + return map_AddPage +} + +var map_Capability = map[string]string{ + "": "Capabilities contains set of UI capabilities and their state in the console UI.", + "name": "name is the unique name of a capability. Available capabilities are LightspeedButton and GettingStartedBanner.", + "visibility": "visibility defines the visibility state of the capability.", +} + +func (Capability) SwaggerDoc() map[string]string { + return map_Capability +} + +var map_CapabilityVisibility = map[string]string{ + "": "CapabilityVisibility defines the criteria to enable/disable a capability.", + "state": "state defines if the capability is enabled or disabled in the console UI. Enabling the capability in the console UI is represented by the \"Enabled\" value. Disabling the capability in the console UI is represented by the \"Disabled\" value.", +} + +func (CapabilityVisibility) SwaggerDoc() map[string]string { + return map_CapabilityVisibility +} + +var map_Console = map[string]string{ + "": "Console provides a means to configure an operator to manage the console.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (Console) SwaggerDoc() map[string]string { + return map_Console +} + +var map_ConsoleConfigRoute = map[string]string{ + "": "ConsoleConfigRoute holds information on external route access to console. DEPRECATED", + "hostname": "hostname is the desired custom domain under which console will be available.", + "secret": "secret points to secret in the openshift-config namespace that contains custom certificate and key and needs to be created manually by the cluster admin. Referenced Secret is required to contain following key value pairs: - \"tls.crt\" - to specifies custom certificate - \"tls.key\" - to specifies private key of the custom certificate If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed.", +} + +func (ConsoleConfigRoute) SwaggerDoc() map[string]string { + return map_ConsoleConfigRoute +} + +var map_ConsoleCustomization = map[string]string{ + "": "ConsoleCustomization defines a list of optional configuration for the console UI.", + "capabilities": "capabilities defines an array of capabilities that can be interacted with in the console UI. Each capability defines a visual state that can be interacted with the console to render in the UI. Available capabilities are LightspeedButton and GettingStartedBanner. Each of the available capabilities may appear only once in the list.", + "brand": "brand is the default branding of the web console which can be overridden by providing the brand field. There is a limited set of specific brand options. This field controls elements of the console such as the logo. Invalid value will prevent a console rollout.", + "documentationBaseURL": "documentationBaseURL links to external documentation are shown in various sections of the web console. Providing documentationBaseURL will override the default documentation URL. Invalid value will prevent a console rollout.", + "customProductName": "customProductName is the name that will be displayed in page titles, logo alt text, and the about dialog instead of the normal OpenShift product name.", + "customLogoFile": "customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a ConfigMap in the openshift-config namespace. This can be created with a command like 'oc create configmap custom-logo --from-file=/path/to/file -n openshift-config'. Image size must be less than 1 MB due to constraints on the ConfigMap size. The ConfigMap key should include a file extension so that the console serves the file with the correct MIME type. Recommended logo specifications: Dimensions: Max height of 68px and max width of 200px SVG format preferred", + "developerCatalog": "developerCatalog allows to configure the shown developer catalog categories (filters) and types (sub-catalogs).", + "projectAccess": "projectAccess allows customizing the available list of ClusterRoles in the Developer perspective Project access page which can be used by a project admin to specify roles to other users and restrict access within the project. If set, the list will replace the default ClusterRole options.", + "quickStarts": "quickStarts allows customization of available ConsoleQuickStart resources in console.", + "addPage": "addPage allows customizing actions on the Add page in developer perspective.", + "perspectives": "perspectives allows enabling/disabling of perspective(s) that user can see in the Perspective switcher dropdown.", +} + +func (ConsoleCustomization) SwaggerDoc() map[string]string { + return map_ConsoleCustomization +} + +var map_ConsoleList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ConsoleList) SwaggerDoc() map[string]string { + return map_ConsoleList +} + +var map_ConsoleProviders = map[string]string{ + "": "ConsoleProviders defines a list of optional additional providers of functionality to the console.", + "statuspage": "statuspage contains ID for statuspage.io page that provides status info about.", +} + +func (ConsoleProviders) SwaggerDoc() map[string]string { + return map_ConsoleProviders +} + +var map_ConsoleSpec = map[string]string{ + "": "ConsoleSpec is the specification of the desired behavior of the Console.", + "customization": "customization is used to optionally provide a small set of customization options to the web console.", + "providers": "providers contains configuration for using specific service providers.", + "route": "route contains hostname and secret reference that contains the serving certificate. If a custom route is specified, a new route will be created with the provided hostname, under which console will be available. In case of custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed. In case of custom hostname points to an arbitrary domain, manual DNS configurations steps are necessary. The default console route will be maintained to reserve the default hostname for console if the custom route is removed. If not specified, default route will be used. DEPRECATED", + "plugins": "plugins defines a list of enabled console plugin names.", + "ingress": "ingress allows to configure the alternative ingress for the console. This field is intended for clusters without ingress capability, where access to routes is not possible.", +} + +func (ConsoleSpec) SwaggerDoc() map[string]string { + return map_ConsoleSpec +} + +var map_ConsoleStatus = map[string]string{ + "": "ConsoleStatus defines the observed status of the Console.", +} + +func (ConsoleStatus) SwaggerDoc() map[string]string { + return map_ConsoleStatus +} + +var map_DeveloperConsoleCatalogCategory = map[string]string{ + "": "DeveloperConsoleCatalogCategory for the developer console catalog.", + "subcategories": "subcategories defines a list of child categories.", +} + +func (DeveloperConsoleCatalogCategory) SwaggerDoc() map[string]string { + return map_DeveloperConsoleCatalogCategory +} + +var map_DeveloperConsoleCatalogCategoryMeta = map[string]string{ + "": "DeveloperConsoleCatalogCategoryMeta are the key identifiers of a developer catalog category.", + "id": "id is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters.", + "label": "label defines a category display label. It is required and must have 1-64 characters.", + "tags": "tags is a list of strings that will match the category. A selected category show all items which has at least one overlapping tag between category and item.", +} + +func (DeveloperConsoleCatalogCategoryMeta) SwaggerDoc() map[string]string { + return map_DeveloperConsoleCatalogCategoryMeta +} + +var map_DeveloperConsoleCatalogCustomization = map[string]string{ + "": "DeveloperConsoleCatalogCustomization allow cluster admin to configure developer catalog.", + "categories": "categories which are shown in the developer catalog.", + "types": "types allows enabling or disabling of sub-catalog types that user can see in the Developer catalog. When omitted, all the sub-catalog types will be shown.", +} + +func (DeveloperConsoleCatalogCustomization) SwaggerDoc() map[string]string { + return map_DeveloperConsoleCatalogCustomization +} + +var map_DeveloperConsoleCatalogTypes = map[string]string{ + "": "DeveloperConsoleCatalogTypes defines the state of the sub-catalog types.", + "state": "state defines if a list of catalog types should be enabled or disabled.", + "enabled": "enabled is a list of developer catalog types (sub-catalogs IDs) that will be shown to users. Types (sub-catalogs) are added via console plugins, the available types (sub-catalog IDs) are available in the console on the cluster configuration page, or when editing the YAML in the console. Example: \"Devfile\", \"HelmChart\", \"BuilderImage\" If the list is non-empty, a new type will not be shown to the user until it is added to list. If the list is empty the complete developer catalog will be shown.", + "disabled": "disabled is a list of developer catalog types (sub-catalogs IDs) that are not shown to users. Types (sub-catalogs) are added via console plugins, the available types (sub-catalog IDs) are available in the console on the cluster configuration page, or when editing the YAML in the console. Example: \"Devfile\", \"HelmChart\", \"BuilderImage\" If the list is empty or all the available sub-catalog types are added, then the complete developer catalog should be hidden.", +} + +func (DeveloperConsoleCatalogTypes) SwaggerDoc() map[string]string { + return map_DeveloperConsoleCatalogTypes +} + +var map_Ingress = map[string]string{ + "": "Ingress allows cluster admin to configure alternative ingress for the console.", + "consoleURL": "consoleURL is a URL to be used as the base console address. If not specified, the console route hostname will be used. This field is required for clusters without ingress capability, where access to routes is not possible. Make sure that appropriate ingress is set up at this URL. The console operator will monitor the URL and may go degraded if it's unreachable for an extended period. Must use the HTTPS scheme.", + "clientDownloadsURL": "clientDownloadsURL is a URL to be used as the address to download client binaries. If not specified, the downloads route hostname will be used. This field is required for clusters without ingress capability, where access to routes is not possible. The console operator will monitor the URL and may go degraded if it's unreachable for an extended period. Must use the HTTPS scheme.", +} + +func (Ingress) SwaggerDoc() map[string]string { + return map_Ingress +} + +var map_Perspective = map[string]string{ + "": "Perspective defines a perspective that cluster admins want to show/hide in the perspective switcher dropdown", + "id": "id defines the id of the perspective. Example: \"dev\", \"admin\". The available perspective ids can be found in the code snippet section next to the yaml editor. Incorrect or unknown ids will be ignored.", + "visibility": "visibility defines the state of perspective along with access review checks if needed for that perspective.", + "pinnedResources": "pinnedResources defines the list of default pinned resources that users will see on the perspective navigation if they have not customized these pinned resources themselves. The list of available Kubernetes resources could be read via `kubectl api-resources`. The console will also provide a configuration UI and a YAML snippet that will list the available resources that can be pinned to the navigation. Incorrect or unknown resources will be ignored.", +} + +func (Perspective) SwaggerDoc() map[string]string { + return map_Perspective +} + +var map_PerspectiveVisibility = map[string]string{ + "": "PerspectiveVisibility defines the criteria to show/hide a perspective", + "state": "state defines the perspective is enabled or disabled or access review check is required.", + "accessReview": "accessReview defines required and missing access review checks.", +} + +func (PerspectiveVisibility) SwaggerDoc() map[string]string { + return map_PerspectiveVisibility +} + +var map_PinnedResourceReference = map[string]string{ + "": "PinnedResourceReference includes the group, version and type of resource", + "group": "group is the API Group of the Resource. Enter empty string for the core group. This value should consist of only lowercase alphanumeric characters, hyphens and periods. Example: \"\", \"apps\", \"build.openshift.io\", etc.", + "version": "version is the API Version of the Resource. This value should consist of only lowercase alphanumeric characters. Example: \"v1\", \"v1beta1\", etc.", + "resource": "resource is the type that is being referenced. It is normally the plural form of the resource kind in lowercase. This value should consist of only lowercase alphanumeric characters and hyphens. Example: \"deployments\", \"deploymentconfigs\", \"pods\", etc.", +} + +func (PinnedResourceReference) SwaggerDoc() map[string]string { + return map_PinnedResourceReference +} + +var map_ProjectAccess = map[string]string{ + "": "ProjectAccess contains options for project access roles", + "availableClusterRoles": "availableClusterRoles is the list of ClusterRole names that are assignable to users through the project access tab.", +} + +func (ProjectAccess) SwaggerDoc() map[string]string { + return map_ProjectAccess +} + +var map_QuickStarts = map[string]string{ + "": "QuickStarts allow cluster admins to customize available ConsoleQuickStart resources.", + "disabled": "disabled is a list of ConsoleQuickStart resource names that are not shown to users.", +} + +func (QuickStarts) SwaggerDoc() map[string]string { + return map_QuickStarts +} + +var map_ResourceAttributesAccessReview = map[string]string{ + "": "ResourceAttributesAccessReview defines the visibility of the perspective depending on the access review checks. `required` and `missing` can work together esp. in the case where the cluster admin wants to show another perspective to users without specific permissions. Out of `required` and `missing` atleast one property should be non-empty.", + "required": "required defines a list of permission checks. The perspective will only be shown when all checks are successful. When omitted, the access review is skipped and the perspective will not be shown unless it is required to do so based on the configuration of the missing access review list.", + "missing": "missing defines a list of permission checks. The perspective will only be shown when at least one check fails. When omitted, the access review is skipped and the perspective will not be shown unless it is required to do so based on the configuration of the required access review list.", +} + +func (ResourceAttributesAccessReview) SwaggerDoc() map[string]string { + return map_ResourceAttributesAccessReview +} + +var map_StatuspageProvider = map[string]string{ + "": "StatuspageProvider provides identity for statuspage account.", + "pageID": "pageID is the unique ID assigned by Statuspage for your page. This must be a public page.", +} + +func (StatuspageProvider) SwaggerDoc() map[string]string { + return map_StatuspageProvider +} + +var map_AWSCSIDriverConfigSpec = map[string]string{ + "": "AWSCSIDriverConfigSpec defines properties that can be configured for the AWS CSI driver.", + "kmsKeyARN": "kmsKeyARN sets the cluster default storage class to encrypt volumes with a user-defined KMS key, rather than the default KMS key used by AWS. The value may be either the ARN or Alias ARN of a KMS key.", + "efsVolumeMetrics": "efsVolumeMetrics sets the configuration for collecting metrics from EFS volumes used by the EFS CSI Driver.", +} + +func (AWSCSIDriverConfigSpec) SwaggerDoc() map[string]string { + return map_AWSCSIDriverConfigSpec +} + +var map_AWSEFSVolumeMetrics = map[string]string{ + "": "AWSEFSVolumeMetrics defines the configuration for volume metrics in the EFS CSI Driver.", + "state": "state defines the state of metric collection in the AWS EFS CSI Driver. This field is required and must be set to one of the following values: Disabled or RecursiveWalk. Disabled means no metrics collection will be performed. This is the default value. RecursiveWalk means the AWS EFS CSI Driver will recursively scan volumes to collect metrics. This process may result in high CPU and memory usage, depending on the volume size.", + "recursiveWalk": "recursiveWalk provides additional configuration for collecting volume metrics in the AWS EFS CSI Driver when the state is set to RecursiveWalk.", +} + +func (AWSEFSVolumeMetrics) SwaggerDoc() map[string]string { + return map_AWSEFSVolumeMetrics +} + +var map_AWSEFSVolumeMetricsRecursiveWalkConfig = map[string]string{ + "": "AWSEFSVolumeMetricsRecursiveWalkConfig defines options for volume metrics in the EFS CSI Driver.", + "refreshPeriodMinutes": "refreshPeriodMinutes specifies the frequency, in minutes, at which volume metrics are refreshed. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is 240. The valid range is from 1 to 43200 minutes (30 days).", + "fsRateLimit": "fsRateLimit defines the rate limit, in goroutines per file system, for processing volume metrics. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is 5. The valid range is from 1 to 100 goroutines.", +} + +func (AWSEFSVolumeMetricsRecursiveWalkConfig) SwaggerDoc() map[string]string { + return map_AWSEFSVolumeMetricsRecursiveWalkConfig +} + +var map_AzureCSIDriverConfigSpec = map[string]string{ + "": "AzureCSIDriverConfigSpec defines properties that can be configured for the Azure CSI driver.", + "diskEncryptionSet": "diskEncryptionSet sets the cluster default storage class to encrypt volumes with a customer-managed encryption set, rather than the default platform-managed keys.", +} + +func (AzureCSIDriverConfigSpec) SwaggerDoc() map[string]string { + return map_AzureCSIDriverConfigSpec +} + +var map_AzureDiskEncryptionSet = map[string]string{ + "": "AzureDiskEncryptionSet defines the configuration for a disk encryption set.", + "subscriptionID": "subscriptionID defines the Azure subscription that contains the disk encryption set. The value should meet the following conditions: 1. It should be a 128-bit number. 2. It should be 36 characters (32 hexadecimal characters and 4 hyphens) long. 3. It should be displayed in five groups separated by hyphens (-). 4. The first group should be 8 characters long. 5. The second, third, and fourth groups should be 4 characters long. 6. The fifth group should be 12 characters long. An Example SubscrionID: f2007bbf-f802-4a47-9336-cf7c6b89b378", + "resourceGroup": "resourceGroup defines the Azure resource group that contains the disk encryption set. The value should consist of only alphanumberic characters, underscores (_), parentheses, hyphens and periods. The value should not end in a period and be at most 90 characters in length.", + "name": "name is the name of the disk encryption set that will be set on the default storage class. The value should consist of only alphanumberic characters, underscores (_), hyphens, and be at most 80 characters in length.", +} + +func (AzureDiskEncryptionSet) SwaggerDoc() map[string]string { + return map_AzureDiskEncryptionSet +} + +var map_CSIDriverConfigSpec = map[string]string{ + "": "CSIDriverConfigSpec defines configuration spec that can be used to optionally configure a specific CSI Driver.", + "driverType": "driverType indicates type of CSI driver for which the driverConfig is being applied to. Valid values are: AWS, Azure, GCP, IBMCloud, vSphere and omitted. Consumers should treat unknown values as a NO-OP.", + "aws": "aws is used to configure the AWS CSI driver.", + "azure": "azure is used to configure the Azure CSI driver.", + "gcp": "gcp is used to configure the GCP CSI driver.", + "ibmcloud": "ibmcloud is used to configure the IBM Cloud CSI driver.", + "vSphere": "vSphere is used to configure the vsphere CSI driver.", +} + +func (CSIDriverConfigSpec) SwaggerDoc() map[string]string { + return map_CSIDriverConfigSpec +} + +var map_ClusterCSIDriver = map[string]string{ + "": "ClusterCSIDriver object allows management and configuration of a CSI driver operator installed by default in OpenShift. Name of the object must be name of the CSI driver it operates. See CSIDriverName type for list of allowed values.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (ClusterCSIDriver) SwaggerDoc() map[string]string { + return map_ClusterCSIDriver +} + +var map_ClusterCSIDriverList = map[string]string{ + "": "ClusterCSIDriverList contains a list of ClusterCSIDriver\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ClusterCSIDriverList) SwaggerDoc() map[string]string { + return map_ClusterCSIDriverList +} + +var map_ClusterCSIDriverSpec = map[string]string{ + "": "ClusterCSIDriverSpec is the desired behavior of CSI driver operator", + "storageClassState": "storageClassState determines if CSI operator should create and manage storage classes. If this field value is empty or Managed - CSI operator will continuously reconcile storage class and create if necessary. If this field value is Unmanaged - CSI operator will not reconcile any previously created storage class. If this field value is Removed - CSI operator will delete the storage class it created previously. When omitted, this means the user has no opinion and the platform chooses a reasonable default, which is subject to change over time. The current default behaviour is Managed.", + "driverConfig": "driverConfig can be used to specify platform specific driver configuration. When omitted, this means no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time.", +} + +func (ClusterCSIDriverSpec) SwaggerDoc() map[string]string { + return map_ClusterCSIDriverSpec +} + +var map_ClusterCSIDriverStatus = map[string]string{ + "": "ClusterCSIDriverStatus is the observed status of CSI driver operator", +} + +func (ClusterCSIDriverStatus) SwaggerDoc() map[string]string { + return map_ClusterCSIDriverStatus +} + +var map_GCPCSIDriverConfigSpec = map[string]string{ + "": "GCPCSIDriverConfigSpec defines properties that can be configured for the GCP CSI driver.", + "kmsKey": "kmsKey sets the cluster default storage class to encrypt volumes with customer-supplied encryption keys, rather than the default keys managed by GCP.", +} + +func (GCPCSIDriverConfigSpec) SwaggerDoc() map[string]string { + return map_GCPCSIDriverConfigSpec +} + +var map_GCPKMSKeyReference = map[string]string{ + "": "GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key", + "name": "name is the name of the customer-managed encryption key to be used for disk encryption. The value should correspond to an existing KMS key and should consist of only alphanumeric characters, hyphens (-) and underscores (_), and be at most 63 characters in length.", + "keyRing": "keyRing is the name of the KMS Key Ring which the KMS Key belongs to. The value should correspond to an existing KMS key ring and should consist of only alphanumeric characters, hyphens (-) and underscores (_), and be at most 63 characters in length.", + "projectID": "projectID is the ID of the Project in which the KMS Key Ring exists. It must be 6 to 30 lowercase letters, digits, or hyphens. It must start with a letter. Trailing hyphens are prohibited.", + "location": "location is the GCP location in which the Key Ring exists. The value must match an existing GCP location, or \"global\". Defaults to global, if not set.", +} + +func (GCPKMSKeyReference) SwaggerDoc() map[string]string { + return map_GCPKMSKeyReference +} + +var map_IBMCloudCSIDriverConfigSpec = map[string]string{ + "": "IBMCloudCSIDriverConfigSpec defines the properties that can be configured for the IBM Cloud CSI driver.", + "encryptionKeyCRN": "encryptionKeyCRN is the IBM Cloud CRN of the customer-managed root key to use for disk encryption of volumes for the default storage classes.", +} + +func (IBMCloudCSIDriverConfigSpec) SwaggerDoc() map[string]string { + return map_IBMCloudCSIDriverConfigSpec +} + +var map_VSphereCSIDriverConfigSpec = map[string]string{ + "": "VSphereCSIDriverConfigSpec defines properties that can be configured for vsphere CSI driver.", + "topologyCategories": "topologyCategories indicates tag categories with which vcenter resources such as hostcluster or datacenter were tagged with. If cluster Infrastructure object has a topology, values specified in Infrastructure object will be used and modifications to topologyCategories will be rejected.", + "globalMaxSnapshotsPerBlockVolume": "globalMaxSnapshotsPerBlockVolume is a global configuration parameter that applies to volumes on all kinds of datastores. If omitted, the platform chooses a default, which is subject to change over time, currently that default is 3. Snapshots can not be disabled using this parameter. Increasing number of snapshots above 3 can have negative impact on performance, for more details see: https://kb.vmware.com/s/article/1025279 Volume snapshot documentation: https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/3.0/vmware-vsphere-csp-getting-started/GUID-E0B41C69-7EEB-450F-A73D-5FD2FF39E891.html", + "granularMaxSnapshotsPerBlockVolumeInVSAN": "granularMaxSnapshotsPerBlockVolumeInVSAN is a granular configuration parameter on vSAN datastore only. It overrides GlobalMaxSnapshotsPerBlockVolume if set, while it falls back to the global constraint if unset. Snapshots for VSAN can not be disabled using this parameter.", + "granularMaxSnapshotsPerBlockVolumeInVVOL": "granularMaxSnapshotsPerBlockVolumeInVVOL is a granular configuration parameter on Virtual Volumes datastore only. It overrides GlobalMaxSnapshotsPerBlockVolume if set, while it falls back to the global constraint if unset. Snapshots for VVOL can not be disabled using this parameter.", +} + +func (VSphereCSIDriverConfigSpec) SwaggerDoc() map[string]string { + return map_VSphereCSIDriverConfigSpec +} + +var map_CSISnapshotController = map[string]string{ + "": "CSISnapshotController provides a means to configure an operator to manage the CSI snapshots. `cluster` is the canonical name.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (CSISnapshotController) SwaggerDoc() map[string]string { + return map_CSISnapshotController +} + +var map_CSISnapshotControllerList = map[string]string{ + "": "CSISnapshotControllerList contains a list of CSISnapshotControllers.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (CSISnapshotControllerList) SwaggerDoc() map[string]string { + return map_CSISnapshotControllerList +} + +var map_CSISnapshotControllerSpec = map[string]string{ + "": "CSISnapshotControllerSpec is the specification of the desired behavior of the CSISnapshotController operator.", +} + +func (CSISnapshotControllerSpec) SwaggerDoc() map[string]string { + return map_CSISnapshotControllerSpec +} + +var map_CSISnapshotControllerStatus = map[string]string{ + "": "CSISnapshotControllerStatus defines the observed status of the CSISnapshotController operator.", +} + +func (CSISnapshotControllerStatus) SwaggerDoc() map[string]string { + return map_CSISnapshotControllerStatus +} + +var map_DNS = map[string]string{ + "": "DNS manages the CoreDNS component to provide a name resolution service for pods and services in the cluster.\n\nThis supports the DNS-based service discovery specification: https://github.com/kubernetes/dns/blob/master/docs/specification.md\n\nMore details: https://kubernetes.io/docs/tasks/administer-cluster/coredns\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the DNS.", + "status": "status is the most recently observed status of the DNS.", +} + +func (DNS) SwaggerDoc() map[string]string { + return map_DNS +} + +var map_DNSCache = map[string]string{ + "": "DNSCache defines the fields for configuring DNS caching.", + "positiveTTL": "positiveTTL is optional and specifies the amount of time that a positive response should be cached.\n\nIf configured, it must be a value of 1s (1 second) or greater up to a theoretical maximum of several years. This field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"100s\", \"1m30s\", \"12h30m10s\". Values that are fractions of a second are rounded down to the nearest second. If the configured value is less than 1s, the default value will be used. If not configured, the value will be 0s and OpenShift will use a default value of 900 seconds unless noted otherwise in the respective Corefile for your version of OpenShift. The default value of 900 seconds is subject to change.", + "negativeTTL": "negativeTTL is optional and specifies the amount of time that a negative response should be cached.\n\nIf configured, it must be a value of 1s (1 second) or greater up to a theoretical maximum of several years. This field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"100s\", \"1m30s\", \"12h30m10s\". Values that are fractions of a second are rounded down to the nearest second. If the configured value is less than 1s, the default value will be used. If not configured, the value will be 0s and OpenShift will use a default value of 30 seconds unless noted otherwise in the respective Corefile for your version of OpenShift. The default value of 30 seconds is subject to change.", +} + +func (DNSCache) SwaggerDoc() map[string]string { + return map_DNSCache +} + +var map_DNSList = map[string]string{ + "": "DNSList contains a list of DNS\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (DNSList) SwaggerDoc() map[string]string { + return map_DNSList +} + +var map_DNSNodePlacement = map[string]string{ + "": "DNSNodePlacement describes the node scheduling configuration for DNS pods.", + "nodeSelector": "nodeSelector is the node selector applied to DNS pods.\n\nIf empty, the default is used, which is currently the following:\n\n kubernetes.io/os: linux\n\nThis default is subject to change.\n\nIf set, the specified selector is used and replaces the default.", + "tolerations": "tolerations is a list of tolerations applied to DNS pods.\n\nIf empty, the DNS operator sets a toleration for the \"node-role.kubernetes.io/master\" taint. This default is subject to change. Specifying tolerations without including a toleration for the \"node-role.kubernetes.io/master\" taint may be risky as it could lead to an outage if all worker nodes become unavailable.\n\nNote that the daemon controller adds some tolerations as well. See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/", +} + +func (DNSNodePlacement) SwaggerDoc() map[string]string { + return map_DNSNodePlacement +} + +var map_DNSOverTLSConfig = map[string]string{ + "": "DNSOverTLSConfig describes optional DNSTransportConfig fields that should be captured.", + "serverName": "serverName is the upstream server to connect to when forwarding DNS queries. This is required when Transport is set to \"TLS\". ServerName will be validated against the DNS naming conventions in RFC 1123 and should match the TLS certificate installed in the upstream resolver(s).", + "caBundle": "caBundle references a ConfigMap that must contain either a single CA Certificate or a CA Bundle. This allows cluster administrators to provide their own CA or CA bundle for validating the certificate of upstream resolvers.\n\n1. The configmap must contain a `ca-bundle.crt` key. 2. The value must be a PEM encoded CA certificate or CA bundle. 3. The administrator must create this configmap in the openshift-config namespace. 4. The upstream server certificate must contain a Subject Alternative Name (SAN) that matches ServerName.", +} + +func (DNSOverTLSConfig) SwaggerDoc() map[string]string { + return map_DNSOverTLSConfig +} + +var map_DNSSpec = map[string]string{ + "": "DNSSpec is the specification of the desired behavior of the DNS.", + "servers": "servers is a list of DNS resolvers that provide name query delegation for one or more subdomains outside the scope of the cluster domain. If servers consists of more than one Server, longest suffix match will be used to determine the Server.\n\nFor example, if there are two Servers, one for \"foo.com\" and another for \"a.foo.com\", and the name query is for \"www.a.foo.com\", it will be routed to the Server with Zone \"a.foo.com\".\n\nIf this field is nil, no servers are created.", + "upstreamResolvers": "upstreamResolvers defines a schema for configuring CoreDNS to proxy DNS messages to upstream resolvers for the case of the default (\".\") server\n\nIf this field is not specified, the upstream used will default to /etc/resolv.conf, with policy \"sequential\"", + "nodePlacement": "nodePlacement provides explicit control over the scheduling of DNS pods.\n\nGenerally, it is useful to run a DNS pod on every node so that DNS queries are always handled by a local DNS pod instead of going over the network to a DNS pod on another node. However, security policies may require restricting the placement of DNS pods to specific nodes. For example, if a security policy prohibits pods on arbitrary nodes from communicating with the API, a node selector can be specified to restrict DNS pods to nodes that are permitted to communicate with the API. Conversely, if running DNS pods on nodes with a particular taint is desired, a toleration can be specified for that taint.\n\nIf unset, defaults are used. See nodePlacement for more details.", + "managementState": "managementState indicates whether the DNS operator should manage cluster DNS", + "operatorLogLevel": "operatorLogLevel controls the logging level of the DNS Operator. Valid values are: \"Normal\", \"Debug\", \"Trace\". Defaults to \"Normal\". setting operatorLogLevel: Trace will produce extremely verbose logs.", + "logLevel": "logLevel describes the desired logging verbosity for CoreDNS. Any one of the following values may be specified: * Normal logs errors from upstream resolvers. * Debug logs errors, NXDOMAIN responses, and NODATA responses. * Trace logs errors and all responses.\n Setting logLevel: Trace will produce extremely verbose logs.\nValid values are: \"Normal\", \"Debug\", \"Trace\". Defaults to \"Normal\".", + "cache": "cache describes the caching configuration that applies to all server blocks listed in the Corefile. This field allows a cluster admin to optionally configure: * positiveTTL which is a duration for which positive responses should be cached. * negativeTTL which is a duration for which negative responses should be cached. If this is not configured, OpenShift will configure positive and negative caching with a default value that is subject to change. At the time of writing, the default positiveTTL is 900 seconds and the default negativeTTL is 30 seconds or as noted in the respective Corefile for your version of OpenShift.", +} + +func (DNSSpec) SwaggerDoc() map[string]string { + return map_DNSSpec +} + +var map_DNSStatus = map[string]string{ + "": "DNSStatus defines the observed status of the DNS.", + "clusterIP": "clusterIP is the service IP through which this DNS is made available.\n\nIn the case of the default DNS, this will be a well known IP that is used as the default nameserver for pods that are using the default ClusterFirst DNS policy.\n\nIn general, this IP can be specified in a pod's spec.dnsConfig.nameservers list or used explicitly when performing name resolution from within the cluster. Example: dig foo.com @\n\nMore info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "clusterDomain": "clusterDomain is the local cluster DNS domain suffix for DNS services. This will be a subdomain as defined in RFC 1034, section 3.5: https://tools.ietf.org/html/rfc1034#section-3.5 Example: \"cluster.local\"\n\nMore info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service", + "conditions": "conditions provide information about the state of the DNS on the cluster.\n\nThese are the supported DNS conditions:\n\n * Available\n - True if the following conditions are met:\n * DNS controller daemonset is available.\n - False if any of those conditions are unsatisfied.", +} + +func (DNSStatus) SwaggerDoc() map[string]string { + return map_DNSStatus +} + +var map_DNSTransportConfig = map[string]string{ + "": "DNSTransportConfig groups related configuration parameters used for configuring forwarding to upstream resolvers that support DNS-over-TLS.", + "transport": "transport allows cluster administrators to opt-in to using a DNS-over-TLS connection between cluster DNS and an upstream resolver(s). Configuring TLS as the transport at this level without configuring a CABundle will result in the system certificates being used to verify the serving certificate of the upstream resolver(s).\n\nPossible values: \"\" (empty) - This means no explicit choice has been made and the platform chooses the default which is subject to change over time. The current default is \"Cleartext\". \"Cleartext\" - Cluster admin specified cleartext option. This results in the same functionality as an empty value but may be useful when a cluster admin wants to be more explicit about the transport, or wants to switch from \"TLS\" to \"Cleartext\" explicitly. \"TLS\" - This indicates that DNS queries should be sent over a TLS connection. If Transport is set to TLS, you MUST also set ServerName. If a port is not included with the upstream IP, port 853 will be tried by default per RFC 7858 section 3.1; https://datatracker.ietf.org/doc/html/rfc7858#section-3.1.", + "tls": "tls contains the additional configuration options to use when Transport is set to \"TLS\".", +} + +func (DNSTransportConfig) SwaggerDoc() map[string]string { + return map_DNSTransportConfig +} + +var map_ForwardPlugin = map[string]string{ + "": "ForwardPlugin defines a schema for configuring the CoreDNS forward plugin.", + "upstreams": "upstreams is a list of resolvers to forward name queries for subdomains of Zones. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy. Each upstream is represented by an IP address or IP:port if the upstream listens on a port other than 53.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin.", + "policy": "policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified:\n\n* \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.\n\nThe default value is \"Random\"", + "transportConfig": "transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver.\n\nThe default value is \"\" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver.", + "protocolStrategy": "protocolStrategy specifies the protocol to use for upstream DNS requests. Valid values for protocolStrategy are \"TCP\" and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is to use the protocol of the original client request. \"TCP\" specifies that the platform should use TCP for all upstream DNS requests, even if the client request uses UDP. \"TCP\" is useful for UDP-specific issues such as those created by non-compliant upstream resolvers, but may consume more bandwidth or increase DNS response time. Note that protocolStrategy only affects the protocol of DNS requests that CoreDNS makes to upstream resolvers. It does not affect the protocol of DNS requests between clients and CoreDNS.", +} + +func (ForwardPlugin) SwaggerDoc() map[string]string { + return map_ForwardPlugin +} + +var map_Server = map[string]string{ + "": "Server defines the schema for a server that runs per instance of CoreDNS.", + "name": "name is required and specifies a unique name for the server. Name must comply with the Service Name Syntax of rfc6335.", + "zones": "zones is required and specifies the subdomains that Server is authoritative for. Zones must conform to the rfc1123 definition of a subdomain. Specifying the cluster domain (i.e., \"cluster.local\") is invalid.", + "forwardPlugin": "forwardPlugin defines a schema for configuring CoreDNS to proxy DNS messages to upstream resolvers.", +} + +func (Server) SwaggerDoc() map[string]string { + return map_Server +} + +var map_Upstream = map[string]string{ + "": "Upstream can either be of type SystemResolvConf, or of type Network.\n\n - For an Upstream of type SystemResolvConf, no further fields are necessary:\n The upstream will be configured to use /etc/resolv.conf.\n - For an Upstream of type Network, a NetworkResolver field needs to be defined\n with an IP address or IP:port if the upstream listens on a port other than 53.", + "type": "type defines whether this upstream contains an IP/IP:port resolver or the local /etc/resolv.conf. Type accepts 2 possible values: SystemResolvConf or Network.\n\n* When SystemResolvConf is used, the Upstream structure does not require any further fields to be defined:\n /etc/resolv.conf will be used\n* When Network is used, the Upstream structure must contain at least an Address", + "address": "address must be defined when Type is set to Network. It will be ignored otherwise. It must be a valid ipv4 or ipv6 address.", + "port": "port may be defined when Type is set to Network. It will be ignored otherwise. Port must be between 65535", +} + +func (Upstream) SwaggerDoc() map[string]string { + return map_Upstream +} + +var map_UpstreamResolvers = map[string]string{ + "": "UpstreamResolvers defines a schema for configuring the CoreDNS forward plugin in the specific case of the default (\".\") server. It defers from ForwardPlugin in the default values it accepts: * At least one upstream should be specified. * the default policy is Sequential", + "upstreams": "upstreams is a list of resolvers to forward name queries for the \".\" domain. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin. If no Upstreams are specified, /etc/resolv.conf is used by default", + "policy": "policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified:\n\n* \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.\n\nThe default value is \"Sequential\"", + "transportConfig": "transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver.\n\nThe default value is \"\" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver.", + "protocolStrategy": "protocolStrategy specifies the protocol to use for upstream DNS requests. Valid values for protocolStrategy are \"TCP\" and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is to use the protocol of the original client request. \"TCP\" specifies that the platform should use TCP for all upstream DNS requests, even if the client request uses UDP. \"TCP\" is useful for UDP-specific issues such as those created by non-compliant upstream resolvers, but may consume more bandwidth or increase DNS response time. Note that protocolStrategy only affects the protocol of DNS requests that CoreDNS makes to upstream resolvers. It does not affect the protocol of DNS requests between clients and CoreDNS.", +} + +func (UpstreamResolvers) SwaggerDoc() map[string]string { + return map_UpstreamResolvers +} + +var map_Etcd = map[string]string{ + "": "Etcd provides information to configure an operator to manage etcd.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (Etcd) SwaggerDoc() map[string]string { + return map_Etcd +} + +var map_EtcdList = map[string]string{ + "": "KubeAPISOperatorConfigList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (EtcdList) SwaggerDoc() map[string]string { + return map_EtcdList +} + +var map_EtcdSpec = map[string]string{ + "controlPlaneHardwareSpeed": "HardwareSpeed allows user to change the etcd tuning profile which configures the latency parameters for heartbeat interval and leader election timeouts allowing the cluster to tolerate longer round-trip-times between etcd members. Valid values are \"\", \"Standard\" and \"Slower\".\n\t\"\" means no opinion and the platform is left to choose a reasonable default\n\twhich is subject to change without notice.", + "backendQuotaGiB": "backendQuotaGiB sets the etcd backend storage size limit in gibibytes. The value should be an integer not less than 8 and not more than 32. When not specified, the default value is 8.", +} + +func (EtcdSpec) SwaggerDoc() map[string]string { + return map_EtcdSpec +} + +var map_AWSClassicLoadBalancerParameters = map[string]string{ + "": "AWSClassicLoadBalancerParameters holds configuration parameters for an AWS Classic load balancer.", + "connectionIdleTimeout": "connectionIdleTimeout specifies the maximum time period that a connection may be idle before the load balancer closes the connection. The value must be parseable as a time duration value; see . A nil or zero value means no opinion, in which case a default value is used. The default value for this field is 60s. This default is subject to change.", + "subnets": "subnets specifies the subnets to which the load balancer will attach. The subnets may be specified by either their ID or name. The total number of subnets is limited to 10.\n\nIn order for the load balancer to be provisioned with subnets, each subnet must exist, each subnet must be from a different availability zone, and the load balancer service must be recreated to pick up new values.\n\nWhen omitted from the spec, the subnets will be auto-discovered for each availability zone. Auto-discovered subnets are not reported in the status of the IngressController object.", +} + +func (AWSClassicLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_AWSClassicLoadBalancerParameters +} + +var map_AWSLoadBalancerParameters = map[string]string{ + "": "AWSLoadBalancerParameters provides configuration settings that are specific to AWS load balancers.", + "type": "type is the type of AWS load balancer to instantiate for an ingresscontroller.\n\nValid values are:\n\n* \"Classic\": A Classic Load Balancer that makes routing decisions at either\n the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See\n the following for additional details:\n\n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb\n\n* \"NLB\": A Network Load Balancer that makes routing decisions at the\n transport layer (TCP/SSL). See the following for additional details:\n\n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb", + "classicLoadBalancer": "classicLoadBalancerParameters holds configuration parameters for an AWS classic load balancer. Present only if type is Classic.", + "networkLoadBalancer": "networkLoadBalancerParameters holds configuration parameters for an AWS network load balancer. Present only if type is NLB.", +} + +func (AWSLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_AWSLoadBalancerParameters +} + +var map_AWSNetworkLoadBalancerParameters = map[string]string{ + "": "AWSNetworkLoadBalancerParameters holds configuration parameters for an AWS Network load balancer. For Example: Setting AWS EIPs https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html", + "subnets": "subnets specifies the subnets to which the load balancer will attach. The subnets may be specified by either their ID or name. The total number of subnets is limited to 10.\n\nIn order for the load balancer to be provisioned with subnets, each subnet must exist, each subnet must be from a different availability zone, and the load balancer service must be recreated to pick up new values.\n\nWhen omitted from the spec, the subnets will be auto-discovered for each availability zone. Auto-discovered subnets are not reported in the status of the IngressController object.", + "eipAllocations": "eipAllocations is a list of IDs for Elastic IP (EIP) addresses that are assigned to the Network Load Balancer. The following restrictions apply:\n\neipAllocations can only be used with external scope, not internal. An EIP can be allocated to only a single IngressController. The number of EIP allocations must match the number of subnets that are used for the load balancer. Each EIP allocation must be unique. A maximum of 10 EIP allocations are permitted.\n\nSee https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html for general information about configuration, characteristics, and limitations of Elastic IP addresses.", +} + +func (AWSNetworkLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_AWSNetworkLoadBalancerParameters +} + +var map_AWSSubnets = map[string]string{ + "": "AWSSubnets contains a list of references to AWS subnets by ID or name.", + "ids": "ids specifies a list of AWS subnets by subnet ID. Subnet IDs must start with \"subnet-\", consist only of alphanumeric characters, must be exactly 24 characters long, must be unique, and the total number of subnets specified by ids and names must not exceed 10.", + "names": "names specifies a list of AWS subnets by subnet name. Subnet names must not start with \"subnet-\", must not include commas, must be under 256 characters in length, must be unique, and the total number of subnets specified by ids and names must not exceed 10.", +} + +func (AWSSubnets) SwaggerDoc() map[string]string { + return map_AWSSubnets +} + +var map_AccessLogging = map[string]string{ + "": "AccessLogging describes how client requests should be logged.", + "destination": "destination is where access logs go.", + "httpLogFormat": "httpLogFormat specifies the format of the log message for an HTTP request.\n\nIf this field is empty, log messages use the implementation's default HTTP log format. For HAProxy's default HTTP log format, see the HAProxy documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3\n\nNote that this format only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). It does not affect the log format for TLS passthrough connections.", + "httpCaptureHeaders": "httpCaptureHeaders defines HTTP headers that should be captured in access logs. If this field is empty, no headers are captured.\n\nNote that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be captured for TLS passthrough connections.", + "httpCaptureCookies": "httpCaptureCookies specifies HTTP cookies that should be captured in access logs. If this field is empty, no cookies are captured.", + "logEmptyRequests": "logEmptyRequests specifies how connections on which no request is received should be logged. Typically, these empty requests come from load balancers' health probes or Web browsers' speculative connections (\"preconnect\"), in which case logging these requests may be undesirable. However, these requests may also be caused by network errors, in which case logging empty requests may be useful for diagnosing the errors. In addition, these requests may be caused by port scans, in which case logging empty requests may aid in detecting intrusion attempts. Allowed values for this field are \"Log\" and \"Ignore\". The default value is \"Log\".", +} + +func (AccessLogging) SwaggerDoc() map[string]string { + return map_AccessLogging +} + +var map_ClientTLS = map[string]string{ + "": "ClientTLS specifies TLS configuration to enable client-to-server authentication, which can be used for mutual TLS.", + "clientCertificatePolicy": "clientCertificatePolicy specifies whether the ingress controller requires clients to provide certificates. This field accepts the values \"Required\" or \"Optional\".\n\nNote that the ingress controller only checks client certificates for edge-terminated and reencrypt TLS routes; it cannot check certificates for cleartext HTTP or passthrough TLS routes.", + "clientCA": "clientCA specifies a configmap containing the PEM-encoded CA certificate bundle that should be used to verify a client's certificate. The administrator must create this configmap in the openshift-config namespace.", + "allowedSubjectPatterns": "allowedSubjectPatterns specifies a list of regular expressions that should be matched against the distinguished name on a valid client certificate to filter requests. The regular expressions must use PCRE syntax. If this list is empty, no filtering is performed. If the list is nonempty, then at least one pattern must match a client certificate's distinguished name or else the ingress controller rejects the certificate and denies the connection.", +} + +func (ClientTLS) SwaggerDoc() map[string]string { + return map_ClientTLS +} + +var map_ContainerLoggingDestinationParameters = map[string]string{ + "": "ContainerLoggingDestinationParameters describes parameters for the Container logging destination type.", + "maxLength": "maxLength is the maximum length of the log message.\n\nValid values are integers in the range 480 to 8192, inclusive.\n\nWhen omitted, the default value is 1024.", +} + +func (ContainerLoggingDestinationParameters) SwaggerDoc() map[string]string { + return map_ContainerLoggingDestinationParameters +} + +var map_EndpointPublishingStrategy = map[string]string{ + "": "EndpointPublishingStrategy is a way to publish the endpoints of an IngressController, and represents the type and any additional configuration for a specific type.", + "type": "type is the publishing strategy to use. Valid values are:\n\n* LoadBalancerService\n\nPublishes the ingress controller using a Kubernetes LoadBalancer Service.\n\nIn this configuration, the ingress controller deployment uses container networking. A LoadBalancer Service is created to publish the deployment.\n\nSee: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer\n\nIf domain is set, a wildcard DNS record will be managed to point at the LoadBalancer Service's external name. DNS records are managed only in DNS zones defined by dns.config.openshift.io/cluster .spec.publicZone and .spec.privateZone.\n\nWildcard DNS management is currently supported only on the AWS, Azure, and GCP platforms.\n\n* HostNetwork\n\nPublishes the ingress controller on node ports where the ingress controller is deployed.\n\nIn this configuration, the ingress controller deployment uses host networking, bound to node ports 80 and 443. The user is responsible for configuring an external load balancer to publish the ingress controller via the node ports.\n\n* Private\n\nDoes not publish the ingress controller.\n\nIn this configuration, the ingress controller deployment uses container networking, and is not explicitly published. The user must manually publish the ingress controller.\n\n* NodePortService\n\nPublishes the ingress controller using a Kubernetes NodePort Service.\n\nIn this configuration, the ingress controller deployment uses container networking. A NodePort Service is created to publish the deployment. The specific node ports are dynamically allocated by OpenShift; however, to support static port allocations, user changes to the node port field of the managed NodePort Service will preserved.", + "loadBalancer": "loadBalancer holds parameters for the load balancer. Present only if type is LoadBalancerService.", + "hostNetwork": "hostNetwork holds parameters for the HostNetwork endpoint publishing strategy. Present only if type is HostNetwork.", + "private": "private holds parameters for the Private endpoint publishing strategy. Present only if type is Private.", + "nodePort": "nodePort holds parameters for the NodePortService endpoint publishing strategy. Present only if type is NodePortService.", +} + +func (EndpointPublishingStrategy) SwaggerDoc() map[string]string { + return map_EndpointPublishingStrategy +} + +var map_GCPLoadBalancerParameters = map[string]string{ + "": "GCPLoadBalancerParameters provides configuration settings that are specific to GCP load balancers.", + "clientAccess": "clientAccess describes how client access is restricted for internal load balancers.\n\nValid values are: * \"Global\": Specifying an internal load balancer with Global client access\n allows clients from any region within the VPC to communicate with the load\n balancer.\n\n https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access\n\n* \"Local\": Specifying an internal load balancer with Local client access\n means only clients within the same region (and VPC) as the GCP load balancer\n can communicate with the load balancer. Note that this is the default behavior.\n\n https://cloud.google.com/load-balancing/docs/internal#client_access", +} + +func (GCPLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_GCPLoadBalancerParameters +} + +var map_HTTPCompressionPolicy = map[string]string{ + "": "httpCompressionPolicy turns on compression for the specified MIME types.\n\nThis field is optional, and its absence implies that compression should not be enabled globally in HAProxy.\n\nIf httpCompressionPolicy exists, compression should be enabled only for the specified MIME types.", + "mimeTypes": "mimeTypes is a list of MIME types that should have compression applied. This list can be empty, in which case the ingress controller does not apply compression.\n\nNote: Not all MIME types benefit from compression, but HAProxy will still use resources to try to compress if instructed to. Generally speaking, text (html, css, js, etc.) formats benefit from compression, but formats that are already compressed (image, audio, video, etc.) benefit little in exchange for the time and cpu spent on compressing again. See https://joehonton.medium.com/the-gzip-penalty-d31bd697f1a2", +} + +func (HTTPCompressionPolicy) SwaggerDoc() map[string]string { + return map_HTTPCompressionPolicy +} + +var map_HostNetworkStrategy = map[string]string{ + "": "HostNetworkStrategy holds parameters for the HostNetwork endpoint publishing strategy.", + "protocol": "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol.\n\nPROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol.\n\nThe following values are valid for this field:\n\n* The empty string. * \"TCP\". * \"PROXY\".\n\nThe empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change.", + "httpPort": "httpPort is the port on the host which should be used to listen for HTTP requests. This field should be set when port 80 is already in use. The value should not coincide with the NodePort range of the cluster. When the value is 0 or is not specified it defaults to 80.", + "httpsPort": "httpsPort is the port on the host which should be used to listen for HTTPS requests. This field should be set when port 443 is already in use. The value should not coincide with the NodePort range of the cluster. When the value is 0 or is not specified it defaults to 443.", + "statsPort": "statsPort is the port on the host where the stats from the router are published. The value should not coincide with the NodePort range of the cluster. If an external load balancer is configured to forward connections to this IngressController, the load balancer should use this port for health checks. The load balancer can send HTTP probes on this port on a given node, with the path /healthz/ready to determine if the ingress controller is ready to receive traffic on the node. For proper operation the load balancer must not forward traffic to a node until the health check reports ready. The load balancer should also stop forwarding requests within a maximum of 45 seconds after /healthz/ready starts reporting not-ready. Probing every 5 to 10 seconds, with a 5-second timeout and with a threshold of two successful or failed requests to become healthy or unhealthy respectively, are well-tested values. When the value is 0 or is not specified it defaults to 1936.", +} + +func (HostNetworkStrategy) SwaggerDoc() map[string]string { + return map_HostNetworkStrategy +} + +var map_IBMLoadBalancerParameters = map[string]string{ + "": "IBMLoadBalancerParameters provides configuration settings that are specific to IBM Cloud load balancers.", + "protocol": "protocol specifies whether the load balancer uses PROXY protocol to forward connections to the IngressController. See \"service.kubernetes.io/ibm-load-balancer-cloud-provider-enable-features: \"proxy-protocol\"\" at https://cloud.ibm.com/docs/containers?topic=containers-vpc-lbaas\"\n\nPROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol.\n\nValid values for protocol are TCP, PROXY and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is TCP, without the proxy protocol enabled.", +} + +func (IBMLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_IBMLoadBalancerParameters +} + +var map_IngressController = map[string]string{ + "": "IngressController describes a managed ingress controller for the cluster. The controller can service OpenShift Route and Kubernetes Ingress resources.\n\nWhen an IngressController is created, a new ingress controller deployment is created to allow external traffic to reach the services that expose Ingress or Route resources. Updating this resource may lead to disruption for public facing network connections as a new ingress controller revision may be rolled out.\n\nhttps://kubernetes.io/docs/concepts/services-networking/ingress-controllers\n\nWhenever possible, sensible defaults for the platform are used. See each field for more details.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the IngressController.", + "status": "status is the most recently observed status of the IngressController.", +} + +func (IngressController) SwaggerDoc() map[string]string { + return map_IngressController +} + +var map_IngressControllerCaptureHTTPCookie = map[string]string{ + "": "IngressControllerCaptureHTTPCookie describes an HTTP cookie that should be captured.", + "maxLength": "maxLength specifies a maximum length of the string that will be logged, which includes the cookie name, cookie value, and one-character delimiter. If the log entry exceeds this length, the value will be truncated in the log message. Note that the ingress controller may impose a separate bound on the total length of HTTP headers in a request.", +} + +func (IngressControllerCaptureHTTPCookie) SwaggerDoc() map[string]string { + return map_IngressControllerCaptureHTTPCookie +} + +var map_IngressControllerCaptureHTTPCookieUnion = map[string]string{ + "": "IngressControllerCaptureHTTPCookieUnion describes optional fields of an HTTP cookie that should be captured.", + "matchType": "matchType specifies the type of match to be performed on the cookie name. Allowed values are \"Exact\" for an exact string match and \"Prefix\" for a string prefix match. If \"Exact\" is specified, a name must be specified in the name field. If \"Prefix\" is provided, a prefix must be specified in the namePrefix field. For example, specifying matchType \"Prefix\" and namePrefix \"foo\" will capture a cookie named \"foo\" or \"foobar\" but not one named \"bar\". The first matching cookie is captured.", + "name": "name specifies a cookie name. Its value must be a valid HTTP cookie name as defined in RFC 6265 section 4.1.", + "namePrefix": "namePrefix specifies a cookie name prefix. Its value must be a valid HTTP cookie name as defined in RFC 6265 section 4.1.", +} + +func (IngressControllerCaptureHTTPCookieUnion) SwaggerDoc() map[string]string { + return map_IngressControllerCaptureHTTPCookieUnion +} + +var map_IngressControllerCaptureHTTPHeader = map[string]string{ + "": "IngressControllerCaptureHTTPHeader describes an HTTP header that should be captured.", + "name": "name specifies a header name. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2.", + "maxLength": "maxLength specifies a maximum length for the header value. If a header value exceeds this length, the value will be truncated in the log message. Note that the ingress controller may impose a separate bound on the total length of HTTP headers in a request.", +} + +func (IngressControllerCaptureHTTPHeader) SwaggerDoc() map[string]string { + return map_IngressControllerCaptureHTTPHeader +} + +var map_IngressControllerCaptureHTTPHeaders = map[string]string{ + "": "IngressControllerCaptureHTTPHeaders specifies which HTTP headers the IngressController captures.", + "request": "request specifies which HTTP request headers to capture.\n\nIf this field is empty, no request headers are captured.", + "response": "response specifies which HTTP response headers to capture.\n\nIf this field is empty, no response headers are captured.", +} + +func (IngressControllerCaptureHTTPHeaders) SwaggerDoc() map[string]string { + return map_IngressControllerCaptureHTTPHeaders +} + +var map_IngressControllerHTTPHeader = map[string]string{ + "": "IngressControllerHTTPHeader specifies configuration for setting or deleting an HTTP header.", + "name": "name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, \"-!#$%&'*+.^_`\". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique.", + "action": "action specifies actions to perform on headers, such as setting or deleting headers.", +} + +func (IngressControllerHTTPHeader) SwaggerDoc() map[string]string { + return map_IngressControllerHTTPHeader +} + +var map_IngressControllerHTTPHeaderActionUnion = map[string]string{ + "": "IngressControllerHTTPHeaderActionUnion specifies an action to take on an HTTP header.", + "type": "type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers.", + "set": "set specifies how the HTTP header should be set. This field is required when type is Set and forbidden otherwise.", +} + +func (IngressControllerHTTPHeaderActionUnion) SwaggerDoc() map[string]string { + return map_IngressControllerHTTPHeaderActionUnion +} + +var map_IngressControllerHTTPHeaderActions = map[string]string{ + "": "IngressControllerHTTPHeaderActions defines configuration for actions on HTTP request and response headers.", + "response": "response is a list of HTTP response headers to modify. Actions defined here will modify the response headers of all requests passing through an ingress controller. These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster. IngressController actions for response headers will be executed after Route actions. Currently, actions may define to either `Set` or `Delete` headers values. Actions are applied in sequence as defined in this list. A maximum of 20 response header actions may be configured. Sample fetchers allowed are \"res.hdr\" and \"ssl_c_der\". Converters allowed are \"lower\" and \"base64\". Example header values: \"%[res.hdr(X-target),lower]\", \"%{+Q}[ssl_c_der,base64]\".", + "request": "request is a list of HTTP request headers to modify. Actions defined here will modify the request headers of all requests passing through an ingress controller. These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster. IngressController actions for request headers will be executed before Route actions. Currently, actions may define to either `Set` or `Delete` headers values. Actions are applied in sequence as defined in this list. A maximum of 20 request header actions may be configured. Sample fetchers allowed are \"req.hdr\" and \"ssl_c_der\". Converters allowed are \"lower\" and \"base64\". Example header values: \"%[req.hdr(X-target),lower]\", \"%{+Q}[ssl_c_der,base64]\". ", +} + +func (IngressControllerHTTPHeaderActions) SwaggerDoc() map[string]string { + return map_IngressControllerHTTPHeaderActions +} + +var map_IngressControllerHTTPHeaders = map[string]string{ + "": "IngressControllerHTTPHeaders specifies how the IngressController handles certain HTTP headers.", + "forwardedHeaderPolicy": "forwardedHeaderPolicy specifies when and how the IngressController sets the Forwarded, X-Forwarded-For, X-Forwarded-Host, X-Forwarded-Port, X-Forwarded-Proto, and X-Forwarded-Proto-Version HTTP headers. The value may be one of the following:\n\n* \"Append\", which specifies that the IngressController appends the\n headers, preserving existing headers.\n\n* \"Replace\", which specifies that the IngressController sets the\n headers, replacing any existing Forwarded or X-Forwarded-* headers.\n\n* \"IfNone\", which specifies that the IngressController sets the\n headers if they are not already set.\n\n* \"Never\", which specifies that the IngressController never sets the\n headers, preserving any existing headers.\n\nBy default, the policy is \"Append\".", + "uniqueId": "uniqueId describes configuration for a custom HTTP header that the ingress controller should inject into incoming HTTP requests. Typically, this header is configured to have a value that is unique to the HTTP request. The header can be used by applications or included in access logs to facilitate tracing individual HTTP requests.\n\nIf this field is empty, no such header is injected into requests.", + "headerNameCaseAdjustments": "headerNameCaseAdjustments specifies case adjustments that can be applied to HTTP header names. Each adjustment is specified as an HTTP header name with the desired capitalization. For example, specifying \"X-Forwarded-For\" indicates that the \"x-forwarded-for\" HTTP header should be adjusted to have the specified capitalization.\n\nThese adjustments are only applied to cleartext, edge-terminated, and re-encrypt routes, and only when using HTTP/1.\n\nFor request headers, these adjustments are applied only for routes that have the haproxy.router.openshift.io/h1-adjust-case=true annotation. For response headers, these adjustments are applied to all HTTP responses.\n\nIf this field is empty, no request headers are adjusted.", + "actions": "actions specifies options for modifying headers and their values. Note that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be modified for TLS passthrough connections. Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. `Strict-Transport-Security` may only be configured using the \"haproxy.router.openshift.io/hsts_header\" route annotation, and only in accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. Any actions defined here are applied after any actions related to the following other fields: cache-control, spec.clientTLS, spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, and spec.httpHeaders.headerNameCaseAdjustments. In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after the actions specified in the IngressController's spec.httpHeaders.actions field. In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be executed after the actions specified in the Route's spec.httpHeaders.actions field. Headers set using this API cannot be captured for use in access logs. The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. Please refer to the documentation for that API field for more details.", +} + +func (IngressControllerHTTPHeaders) SwaggerDoc() map[string]string { + return map_IngressControllerHTTPHeaders +} + +var map_IngressControllerHTTPUniqueIdHeaderPolicy = map[string]string{ + "": "IngressControllerHTTPUniqueIdHeaderPolicy describes configuration for a unique id header.", + "name": "name specifies the name of the HTTP header (for example, \"unique-id\") that the ingress controller should inject into HTTP requests. The field's value must be a valid HTTP header name as defined in RFC 2616 section 4.2. If the field is empty, no header is injected.", + "format": "format specifies the format for the injected HTTP header's value. This field has no effect unless name is specified. For the HAProxy-based ingress controller implementation, this format uses the same syntax as the HTTP log format. If the field is empty, the default value is \"%{+X}o\\ %ci:%cp_%fi:%fp_%Ts_%rt:%pid\"; see the corresponding HAProxy documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3", +} + +func (IngressControllerHTTPUniqueIdHeaderPolicy) SwaggerDoc() map[string]string { + return map_IngressControllerHTTPUniqueIdHeaderPolicy +} + +var map_IngressControllerList = map[string]string{ + "": "IngressControllerList contains a list of IngressControllers.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (IngressControllerList) SwaggerDoc() map[string]string { + return map_IngressControllerList +} + +var map_IngressControllerLogging = map[string]string{ + "": "IngressControllerLogging describes what should be logged where.", + "access": "access describes how the client requests should be logged.\n\nIf this field is empty, access logging is disabled.", +} + +func (IngressControllerLogging) SwaggerDoc() map[string]string { + return map_IngressControllerLogging +} + +var map_IngressControllerSetHTTPHeader = map[string]string{ + "": "IngressControllerSetHTTPHeader defines the value which needs to be set on an HTTP header.", + "value": "value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. ", +} + +func (IngressControllerSetHTTPHeader) SwaggerDoc() map[string]string { + return map_IngressControllerSetHTTPHeader +} + +var map_IngressControllerSpec = map[string]string{ + "": "IngressControllerSpec is the specification of the desired behavior of the IngressController.", + "domain": "domain is a DNS name serviced by the ingress controller and is used to configure multiple features:\n\n* For the LoadBalancerService endpoint publishing strategy, domain is\n used to configure DNS records. See endpointPublishingStrategy.\n\n* When using a generated default certificate, the certificate will be valid\n for domain and its subdomains. See defaultCertificate.\n\n* The value is published to individual Route statuses so that end-users\n know where to target external DNS records.\n\ndomain must be unique among all IngressControllers, and cannot be updated.\n\nIf empty, defaults to ingress.config.openshift.io/cluster .spec.domain.", + "httpErrorCodePages": "httpErrorCodePages specifies a configmap with custom error pages. The administrator must create this configmap in the openshift-config namespace. This configmap should have keys in the format \"error-page-.http\", where is an HTTP error code. For example, \"error-page-503.http\" defines an error page for HTTP 503 responses. Currently only error pages for 503 and 404 responses can be customized. Each value in the configmap should be the full response, including HTTP headers. Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http If this field is empty, the ingress controller uses the default error pages.", + "replicas": "replicas is the desired number of ingress controller replicas. If unset, the default depends on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses status.\n\nThe value of replicas is set based on the value of a chosen field in the Infrastructure CR. If defaultPlacement is set to ControlPlane, the chosen field will be controlPlaneTopology. If it is set to Workers the chosen field will be infrastructureTopology. Replicas will then be set to 1 or 2 based whether the chosen field's value is SingleReplica or HighlyAvailable, respectively.\n\nThese defaults are subject to change.", + "endpointPublishingStrategy": "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc.\n\nIf unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform:\n\n AWS: LoadBalancerService (with External scope)\n Azure: LoadBalancerService (with External scope)\n GCP: LoadBalancerService (with External scope)\n IBMCloud: LoadBalancerService (with External scope)\n AlibabaCloud: LoadBalancerService (with External scope)\n Libvirt: HostNetwork\n\nAny other platform types (including None) default to HostNetwork.\n\nendpointPublishingStrategy cannot be updated.", + "defaultCertificate": "defaultCertificate is a reference to a secret containing the default certificate served by the ingress controller. When Routes don't specify their own certificate, defaultCertificate is used.\n\nThe secret must contain the following keys and data:\n\n tls.crt: certificate file contents\n tls.key: key file contents\n\nIf unset, a wildcard certificate is automatically generated and used. The certificate is valid for the ingress controller domain (and subdomains) and the generated certificate's CA will be automatically integrated with the cluster's trust store.\n\nIf a wildcard certificate is used and shared by multiple HTTP/2 enabled routes (which implies ALPN) then clients (i.e., notably browsers) are at liberty to reuse open connections. This means a client can reuse a connection to another route and that is likely to fail. This behaviour is generally known as connection coalescing.\n\nThe in-use certificate (whether generated or user-specified) will be automatically integrated with OpenShift's built-in OAuth server.", + "namespaceSelector": "namespaceSelector is used to filter the set of namespaces serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", + "routeSelector": "routeSelector is used to filter the set of Routes serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", + "nodePlacement": "nodePlacement enables explicit control over the scheduling of the ingress controller.\n\nIf unset, defaults are used. See NodePlacement for more details.", + "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers.\n\nIf unset, the default is based on the apiservers.config.openshift.io/cluster resource.\n\nNote that when using the Old, Intermediate, and Modern profile types, the effective profile configuration is subject to change between releases. For example, given a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress controller, resulting in a rollout.", + "clientTLS": "clientTLS specifies settings for requesting and verifying client certificates, which can be used to enable mutual TLS for edge-terminated and reencrypt routes.", + "routeAdmission": "routeAdmission defines a policy for handling new route claims (for example, to allow or deny claims across namespaces).\n\nIf empty, defaults will be applied. See specific routeAdmission fields for details about their defaults.", + "logging": "logging defines parameters for what should be logged where. If this field is empty, operational logs are enabled but access logs are disabled.", + "httpHeaders": "httpHeaders defines policy for HTTP headers.\n\nIf this field is empty, the default values are used.", + "httpEmptyRequestsPolicy": "httpEmptyRequestsPolicy describes how HTTP connections should be handled if the connection times out before a request is received. Allowed values for this field are \"Respond\" and \"Ignore\". If the field is set to \"Respond\", the ingress controller sends an HTTP 400 or 408 response, logs the connection (if access logging is enabled), and counts the connection in the appropriate metrics. If the field is set to \"Ignore\", the ingress controller closes the connection without sending a response, logging the connection, or incrementing metrics. The default value is \"Respond\".\n\nTypically, these connections come from load balancers' health probes or Web browsers' speculative connections (\"preconnect\") and can be safely ignored. However, these requests may also be caused by network errors, and so setting this field to \"Ignore\" may impede detection and diagnosis of problems. In addition, these requests may be caused by port scans, in which case logging empty requests may aid in detecting intrusion attempts.", + "tuningOptions": "tuningOptions defines parameters for adjusting the performance of ingress controller pods. All fields are optional and will use their respective defaults if not set. See specific tuningOptions fields for more details.\n\nSetting fields within tuningOptions is generally not recommended. The default values are suitable for most configurations.", + "unsupportedConfigOverrides": "unsupportedConfigOverrides allows specifying unsupported configuration options. Its use is unsupported.", + "httpCompression": "httpCompression defines a policy for HTTP traffic compression. By default, there is no HTTP compression.", +} + +func (IngressControllerSpec) SwaggerDoc() map[string]string { + return map_IngressControllerSpec +} + +var map_IngressControllerStatus = map[string]string{ + "": "IngressControllerStatus defines the observed status of the IngressController.", + "availableReplicas": "availableReplicas is number of observed available replicas according to the ingress controller deployment.", + "selector": "selector is a label selector, in string format, for ingress controller pods corresponding to the IngressController. The number of matching pods should equal the value of availableReplicas.", + "domain": "domain is the actual domain in use.", + "endpointPublishingStrategy": "endpointPublishingStrategy is the actual strategy in use.", + "conditions": "conditions is a list of conditions and their status.\n\nAvailable means the ingress controller deployment is available and servicing route and ingress resources (i.e, .status.availableReplicas equals .spec.replicas)\n\nThere are additional conditions which indicate the status of other ingress controller features and capabilities.\n\n * LoadBalancerManaged\n - True if the following conditions are met:\n * The endpoint publishing strategy requires a service load balancer.\n - False if any of those conditions are unsatisfied.\n\n * LoadBalancerReady\n - True if the following conditions are met:\n * A load balancer is managed.\n * The load balancer is ready.\n - False if any of those conditions are unsatisfied.\n\n * DNSManaged\n - True if the following conditions are met:\n * The endpoint publishing strategy and platform support DNS.\n * The ingress controller domain is set.\n * dns.config.openshift.io/cluster configures DNS zones.\n - False if any of those conditions are unsatisfied.\n\n * DNSReady\n - True if the following conditions are met:\n * DNS is managed.\n * DNS records have been successfully created.\n - False if any of those conditions are unsatisfied.", + "tlsProfile": "tlsProfile is the TLS connection configuration that is in effect.", + "observedGeneration": "observedGeneration is the most recent generation observed.", + "namespaceSelector": "namespaceSelector is the actual namespaceSelector in use.", + "routeSelector": "routeSelector is the actual routeSelector in use.", +} + +func (IngressControllerStatus) SwaggerDoc() map[string]string { + return map_IngressControllerStatus +} + +var map_IngressControllerTuningOptions = map[string]string{ + "": "IngressControllerTuningOptions specifies options for tuning the performance of ingress controller pods", + "headerBufferBytes": "headerBufferBytes describes how much memory should be reserved (in bytes) for IngressController connection sessions. Note that this value must be at least 16384 if HTTP/2 is enabled for the IngressController (https://tools.ietf.org/html/rfc7540). If this field is empty, the IngressController will use a default value of 32768 bytes.\n\nSetting this field is generally not recommended as headerBufferBytes values that are too small may break the IngressController and headerBufferBytes values that are too large could cause the IngressController to use significantly more memory than necessary.", + "headerBufferMaxRewriteBytes": "headerBufferMaxRewriteBytes describes how much memory should be reserved (in bytes) from headerBufferBytes for HTTP header rewriting and appending for IngressController connection sessions. Note that incoming HTTP requests will be limited to (headerBufferBytes - headerBufferMaxRewriteBytes) bytes, meaning headerBufferBytes must be greater than headerBufferMaxRewriteBytes. If this field is empty, the IngressController will use a default value of 8192 bytes.\n\nSetting this field is generally not recommended as headerBufferMaxRewriteBytes values that are too small may break the IngressController and headerBufferMaxRewriteBytes values that are too large could cause the IngressController to use significantly more memory than necessary.", + "threadCount": "threadCount defines the number of threads created per HAProxy process. Creating more threads allows each ingress controller pod to handle more connections, at the cost of more system resources being used. HAProxy currently supports up to 64 threads. If this field is empty, the IngressController will use the default value. The current default is 4 threads, but this may change in future releases.\n\nSetting this field is generally not recommended. Increasing the number of HAProxy threads allows ingress controller pods to utilize more CPU time under load, potentially starving other pods if set too high. Reducing the number of threads may cause the ingress controller to perform poorly.", + "clientTimeout": "clientTimeout defines how long a connection will be held open while waiting for a client response.\n\nIf unset, the default timeout is 30s", + "clientFinTimeout": "clientFinTimeout defines how long a connection will be held open while waiting for the client response to the server/backend closing the connection.\n\nIf unset, the default timeout is 1s", + "serverTimeout": "serverTimeout defines how long a connection will be held open while waiting for a server/backend response.\n\nIf unset, the default timeout is 30s", + "serverFinTimeout": "serverFinTimeout defines how long a connection will be held open while waiting for the server/backend response to the client closing the connection.\n\nIf unset, the default timeout is 1s", + "tunnelTimeout": "tunnelTimeout defines how long a tunnel connection (including websockets) will be held open while the tunnel is idle.\n\nIf unset, the default timeout is 1h", + "connectTimeout": "connectTimeout defines the maximum time to wait for a connection attempt to a server/backend to succeed.\n\nThis field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\".\n\nWhen omitted, this means the user has no opinion and the platform is left to choose a reasonable default. This default is subject to change over time. The current default is 5s.", + "tlsInspectDelay": "tlsInspectDelay defines how long the router can hold data to find a matching route.\n\nSetting this too short can cause the router to fall back to the default certificate for edge-terminated or reencrypt routes even when a better matching certificate could be used.\n\nIf unset, the default inspect delay is 5s", + "healthCheckInterval": "healthCheckInterval defines how long the router waits between two consecutive health checks on its configured backends. This value is applied globally as a default for all routes, but may be overridden per-route by the route annotation \"router.openshift.io/haproxy.health.check.interval\".\n\nExpects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, eg \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\".\n\nSetting this to less than 5s can cause excess traffic due to too frequent TCP health checks and accompanying SYN packet storms. Alternatively, setting this too high can result in increased latency, due to backend servers that are no longer available, but haven't yet been detected as such.\n\nAn empty or zero healthCheckInterval means no opinion and IngressController chooses a default, which is subject to change over time. Currently the default healthCheckInterval value is 5s.\n\nCurrently the minimum allowed value is 1s and the maximum allowed value is 2147483647ms (24.85 days). Both are subject to change over time.", + "maxConnections": "maxConnections defines the maximum number of simultaneous connections that can be established per HAProxy process. Increasing this value allows each ingress controller pod to handle more connections but at the cost of additional system resources being consumed.\n\nPermitted values are: empty, 0, -1, and the range 2000-2000000.\n\nIf this field is empty or 0, the IngressController will use the default value of 50000, but the default is subject to change in future releases.\n\nIf the value is -1 then HAProxy will dynamically compute a maximum value based on the available ulimits in the running container. Selecting -1 (i.e., auto) will result in a large value being computed (~520000 on OpenShift >=4.10 clusters) and therefore each HAProxy process will incur significant memory usage compared to the current default of 50000.\n\nSetting a value that is greater than the current operating system limit will prevent the HAProxy process from starting.\n\nIf you choose a discrete value (e.g., 750000) and the router pod is migrated to a new node, there's no guarantee that that new node has identical ulimits configured. In such a scenario the pod would fail to start. If you have nodes with different ulimits configured (e.g., different tuned profiles) and you choose a discrete value then the guidance is to use -1 and let the value be computed dynamically at runtime.\n\nYou can monitor memory usage for router containers with the following metric: 'container_memory_working_set_bytes{container=\"router\",namespace=\"openshift-ingress\"}'.\n\nYou can monitor memory usage of individual HAProxy processes in router containers with the following metric: 'container_memory_working_set_bytes{container=\"router\",namespace=\"openshift-ingress\"}/container_processes{container=\"router\",namespace=\"openshift-ingress\"}'.", + "reloadInterval": "reloadInterval defines the minimum interval at which the router is allowed to reload to accept new changes. Increasing this value can prevent the accumulation of HAProxy processes, depending on the scenario. Increasing this interval can also lessen load imbalance on a backend's servers when using the roundrobin balancing algorithm. Alternatively, decreasing this value may decrease latency since updates to HAProxy's configuration can take effect more quickly.\n\nThe value must be a time duration value; see . Currently, the minimum value allowed is 1s, and the maximum allowed value is 120s. Minimum and maximum allowed values may change in future versions of OpenShift. Note that if a duration outside of these bounds is provided, the value of reloadInterval will be capped/floored and not rejected (e.g. a duration of over 120s will be capped to 120s; the IngressController will not reject and replace this disallowed value with the default).\n\nA zero value for reloadInterval tells the IngressController to choose the default, which is currently 5s and subject to change without notice.\n\nThis field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\".\n\nNote: Setting a value significantly larger than the default of 5s can cause latency in observing updates to routes and their endpoints. HAProxy's configuration will be reloaded less frequently, and newly created routes will not be served until the subsequent reload.", +} + +func (IngressControllerTuningOptions) SwaggerDoc() map[string]string { + return map_IngressControllerTuningOptions +} + +var map_LoadBalancerStrategy = map[string]string{ + "": "LoadBalancerStrategy holds parameters for a load balancer.", + "scope": "scope indicates the scope at which the load balancer is exposed. Possible values are \"External\" and \"Internal\".", + "allowedSourceRanges": "allowedSourceRanges specifies an allowlist of IP address ranges to which access to the load balancer should be restricted. Each range must be specified using CIDR notation (e.g. \"10.0.0.0/8\" or \"fd00::/8\"). If no range is specified, \"0.0.0.0/0\" for IPv4 and \"::/0\" for IPv6 are used by default, which allows all source addresses.\n\nTo facilitate migration from earlier versions of OpenShift that did not have the allowedSourceRanges field, you may set the service.beta.kubernetes.io/load-balancer-source-ranges annotation on the \"router-\" service in the \"openshift-ingress\" namespace, and this annotation will take effect if allowedSourceRanges is empty on OpenShift 4.12.", + "providerParameters": "providerParameters holds desired load balancer information specific to the underlying infrastructure provider.\n\nIf empty, defaults will be applied. See specific providerParameters fields for details about their defaults.", + "dnsManagementPolicy": "dnsManagementPolicy indicates if the lifecycle of the wildcard DNS record associated with the load balancer service will be managed by the ingress operator. It defaults to Managed. Valid values are: Managed and Unmanaged.", +} + +func (LoadBalancerStrategy) SwaggerDoc() map[string]string { + return map_LoadBalancerStrategy +} + +var map_LoggingDestination = map[string]string{ + "": "LoggingDestination describes a destination for log messages.", + "type": "type is the type of destination for logs. It must be one of the following:\n\n* Container\n\nThe ingress operator configures the sidecar container named \"logs\" on the ingress controller pod and configures the ingress controller to write logs to the sidecar. The logs are then available as container logs. The expectation is that the administrator configures a custom logging solution that reads logs from this sidecar. Note that using container logs means that logs may be dropped if the rate of logs exceeds the container runtime's or the custom logging solution's capacity.\n\n* Syslog\n\nLogs are sent to a syslog endpoint. The administrator must specify an endpoint that can receive syslog messages. The expectation is that the administrator has configured a custom syslog instance.", + "syslog": "syslog holds parameters for a syslog endpoint. Present only if type is Syslog.", + "container": "container holds parameters for the Container logging destination. Present only if type is Container.", +} + +func (LoggingDestination) SwaggerDoc() map[string]string { + return map_LoggingDestination +} + +var map_NodePlacement = map[string]string{ + "": "NodePlacement describes node scheduling configuration for an ingress controller.", + "nodeSelector": "nodeSelector is the node selector applied to ingress controller deployments.\n\nIf set, the specified selector is used and replaces the default.\n\nIf unset, the default depends on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses status.\n\nWhen defaultPlacement is Workers, the default is:\n\n kubernetes.io/os: linux\n node-role.kubernetes.io/worker: ''\n\nWhen defaultPlacement is ControlPlane, the default is:\n\n kubernetes.io/os: linux\n node-role.kubernetes.io/master: ''\n\nThese defaults are subject to change.\n\nNote that using nodeSelector.matchExpressions is not supported. Only nodeSelector.matchLabels may be used. This is a limitation of the Kubernetes API: the pod spec does not allow complex expressions for node selectors.", + "tolerations": "tolerations is a list of tolerations applied to ingress controller deployments.\n\nThe default is an empty list.\n\nSee https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/", +} + +func (NodePlacement) SwaggerDoc() map[string]string { + return map_NodePlacement +} + +var map_NodePortStrategy = map[string]string{ + "": "NodePortStrategy holds parameters for the NodePortService endpoint publishing strategy.", + "protocol": "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol.\n\nPROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol.\n\nThe following values are valid for this field:\n\n* The empty string. * \"TCP\". * \"PROXY\".\n\nThe empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change.", +} + +func (NodePortStrategy) SwaggerDoc() map[string]string { + return map_NodePortStrategy +} + +var map_OpenStackLoadBalancerParameters = map[string]string{ + "": "OpenStackLoadBalancerParameters provides configuration settings that are specific to OpenStack load balancers.", + "floatingIP": "floatingIP specifies the IP address that the load balancer will use. When not specified, an IP address will be assigned randomly by the OpenStack cloud provider. When specified, the floating IP has to be pre-created. If the specified value is not a floating IP or is already claimed, the OpenStack cloud provider won't be able to provision the load balancer. This field may only be used if the IngressController has External scope. This value must be a valid IPv4 or IPv6 address. ", +} + +func (OpenStackLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_OpenStackLoadBalancerParameters +} + +var map_PrivateStrategy = map[string]string{ + "": "PrivateStrategy holds parameters for the Private endpoint publishing strategy.", + "protocol": "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol.\n\nPROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol.\n\nThe following values are valid for this field:\n\n* The empty string. * \"TCP\". * \"PROXY\".\n\nThe empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change.", +} + +func (PrivateStrategy) SwaggerDoc() map[string]string { + return map_PrivateStrategy +} + +var map_ProviderLoadBalancerParameters = map[string]string{ + "": "ProviderLoadBalancerParameters holds desired load balancer information specific to the underlying infrastructure provider.", + "type": "type is the underlying infrastructure provider for the load balancer. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"IBM\", \"Nutanix\", \"OpenStack\", and \"VSphere\".", + "aws": "aws provides configuration settings that are specific to AWS load balancers.\n\nIf empty, defaults will be applied. See specific aws fields for details about their defaults.", + "gcp": "gcp provides configuration settings that are specific to GCP load balancers.\n\nIf empty, defaults will be applied. See specific gcp fields for details about their defaults.", + "ibm": "ibm provides configuration settings that are specific to IBM Cloud load balancers.\n\nIf empty, defaults will be applied. See specific ibm fields for details about their defaults.", + "openstack": "openstack provides configuration settings that are specific to OpenStack load balancers.\n\nIf empty, defaults will be applied. See specific openstack fields for details about their defaults.", +} + +func (ProviderLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_ProviderLoadBalancerParameters +} + +var map_RouteAdmissionPolicy = map[string]string{ + "": "RouteAdmissionPolicy is an admission policy for allowing new route claims.", + "namespaceOwnership": "namespaceOwnership describes how host name claims across namespaces should be handled.\n\nValue must be one of:\n\n- Strict: Do not allow routes in different namespaces to claim the same host.\n\n- InterNamespaceAllowed: Allow routes to claim different paths of the same\n host name across namespaces.\n\nIf empty, the default is Strict.", + "wildcardPolicy": "wildcardPolicy describes how routes with wildcard policies should be handled for the ingress controller. WildcardPolicy controls use of routes [1] exposed by the ingress controller based on the route's wildcard policy.\n\n[1] https://github.com/openshift/api/blob/master/route/v1/types.go\n\nNote: Updating WildcardPolicy from WildcardsAllowed to WildcardsDisallowed will cause admitted routes with a wildcard policy of Subdomain to stop working. These routes must be updated to a wildcard policy of None to be readmitted by the ingress controller.\n\nWildcardPolicy supports WildcardsAllowed and WildcardsDisallowed values.\n\nIf empty, defaults to \"WildcardsDisallowed\".", +} + +func (RouteAdmissionPolicy) SwaggerDoc() map[string]string { + return map_RouteAdmissionPolicy +} + +var map_SyslogLoggingDestinationParameters = map[string]string{ + "": "SyslogLoggingDestinationParameters describes parameters for the Syslog logging destination type.", + "address": "address is the IP address of the syslog endpoint that receives log messages.", + "port": "port is the UDP port number of the syslog endpoint that receives log messages.", + "facility": "facility specifies the syslog facility of log messages.\n\nIf this field is empty, the facility is \"local1\".", + "maxLength": "maxLength is the maximum length of the log message.\n\nValid values are integers in the range 480 to 4096, inclusive.\n\nWhen omitted, the default value is 1024.", +} + +func (SyslogLoggingDestinationParameters) SwaggerDoc() map[string]string { + return map_SyslogLoggingDestinationParameters +} + +var map_GatherStatus = map[string]string{ + "": "gatherStatus provides information about the last known gather event.", + "lastGatherTime": "lastGatherTime is the last time when Insights data gathering finished. An empty value means that no data has been gathered yet.", + "lastGatherDuration": "lastGatherDuration is the total time taken to process all gatherers during the last gather event.", + "gatherers": "gatherers is a list of active gatherers (and their statuses) in the last gathering.", +} + +func (GatherStatus) SwaggerDoc() map[string]string { + return map_GatherStatus +} + +var map_GathererStatus = map[string]string{ + "": "gathererStatus represents information about a particular data gatherer.", + "conditions": "conditions provide details on the status of each gatherer.", + "name": "name is the name of the gatherer.", + "lastGatherDuration": "lastGatherDuration represents the time spent gathering.", +} + +func (GathererStatus) SwaggerDoc() map[string]string { + return map_GathererStatus +} + +var map_HealthCheck = map[string]string{ + "": "healthCheck represents an Insights health check attributes.", + "description": "description provides basic description of the healtcheck.", + "totalRisk": "totalRisk of the healthcheck. Indicator of the total risk posed by the detected issue; combination of impact and likelihood. The values can be from 1 to 4, and the higher the number, the more important the issue.", + "advisorURI": "advisorURI provides the URL link to the Insights Advisor.", + "state": "state determines what the current state of the health check is. Health check is enabled by default and can be disabled by the user in the Insights advisor user interface.", +} + +func (HealthCheck) SwaggerDoc() map[string]string { + return map_HealthCheck +} + +var map_InsightsOperator = map[string]string{ + "": "\n\nInsightsOperator holds cluster-wide information about the Insights Operator.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the Insights.", + "status": "status is the most recently observed status of the Insights operator.", +} + +func (InsightsOperator) SwaggerDoc() map[string]string { + return map_InsightsOperator +} + +var map_InsightsOperatorList = map[string]string{ + "": "InsightsOperatorList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (InsightsOperatorList) SwaggerDoc() map[string]string { + return map_InsightsOperatorList +} + +var map_InsightsOperatorStatus = map[string]string{ + "gatherStatus": "gatherStatus provides basic information about the last Insights data gathering. When omitted, this means no data gathering has taken place yet.", + "insightsReport": "insightsReport provides general Insights analysis results. When omitted, this means no data gathering has taken place yet.", +} + +func (InsightsOperatorStatus) SwaggerDoc() map[string]string { + return map_InsightsOperatorStatus +} + +var map_InsightsReport = map[string]string{ + "": "insightsReport provides Insights health check report based on the most recently sent Insights data.", + "downloadedAt": "downloadedAt is the time when the last Insights report was downloaded. An empty value means that there has not been any Insights report downloaded yet and it usually appears in disconnected clusters (or clusters when the Insights data gathering is disabled).", + "healthChecks": "healthChecks provides basic information about active Insights health checks in a cluster.", +} + +func (InsightsReport) SwaggerDoc() map[string]string { + return map_InsightsReport +} + +var map_KubeAPIServer = map[string]string{ + "": "KubeAPIServer provides information to configure an operator to manage kube-apiserver.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the Kubernetes API Server", + "status": "status is the most recently observed status of the Kubernetes API Server", +} + +func (KubeAPIServer) SwaggerDoc() map[string]string { + return map_KubeAPIServer +} + +var map_KubeAPIServerList = map[string]string{ + "": "KubeAPIServerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (KubeAPIServerList) SwaggerDoc() map[string]string { + return map_KubeAPIServerList +} + +var map_KubeAPIServerStatus = map[string]string{ + "serviceAccountIssuers": "serviceAccountIssuers tracks history of used service account issuers. The item without expiration time represents the currently used service account issuer. The other items represents service account issuers that were used previously and are still being trusted. The default expiration for the items is set by the platform and it defaults to 24h. see: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection", +} + +func (KubeAPIServerStatus) SwaggerDoc() map[string]string { + return map_KubeAPIServerStatus +} + +var map_ServiceAccountIssuerStatus = map[string]string{ + "name": "name is the name of the service account issuer", + "expirationTime": "expirationTime is the time after which this service account issuer will be pruned and removed from the trusted list of service account issuers.", +} + +func (ServiceAccountIssuerStatus) SwaggerDoc() map[string]string { + return map_ServiceAccountIssuerStatus +} + +var map_KubeControllerManager = map[string]string{ + "": "KubeControllerManager provides information to configure an operator to manage kube-controller-manager.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the Kubernetes Controller Manager", + "status": "status is the most recently observed status of the Kubernetes Controller Manager", +} + +func (KubeControllerManager) SwaggerDoc() map[string]string { + return map_KubeControllerManager +} + +var map_KubeControllerManagerList = map[string]string{ + "": "KubeControllerManagerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (KubeControllerManagerList) SwaggerDoc() map[string]string { + return map_KubeControllerManagerList +} + +var map_KubeControllerManagerSpec = map[string]string{ + "useMoreSecureServiceCA": "useMoreSecureServiceCA indicates that the service-ca.crt provided in SA token volumes should include only enough certificates to validate service serving certificates. Once set to true, it cannot be set to false. Even if someone finds a way to set it back to false, the service-ca.crt files that previously existed will only have the more secure content.", +} + +func (KubeControllerManagerSpec) SwaggerDoc() map[string]string { + return map_KubeControllerManagerSpec +} + +var map_KubeStorageVersionMigrator = map[string]string{ + "": "KubeStorageVersionMigrator provides information to configure an operator to manage kube-storage-version-migrator.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (KubeStorageVersionMigrator) SwaggerDoc() map[string]string { + return map_KubeStorageVersionMigrator +} + +var map_KubeStorageVersionMigratorList = map[string]string{ + "": "KubeStorageVersionMigratorList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (KubeStorageVersionMigratorList) SwaggerDoc() map[string]string { + return map_KubeStorageVersionMigratorList +} + +var map_MachineConfiguration = map[string]string{ + "": "MachineConfiguration provides information to configure an operator to manage Machine Configuration.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the Machine Config Operator", + "status": "status is the most recently observed status of the Machine Config Operator", +} + +func (MachineConfiguration) SwaggerDoc() map[string]string { + return map_MachineConfiguration +} + +var map_MachineConfigurationList = map[string]string{ + "": "MachineConfigurationList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (MachineConfigurationList) SwaggerDoc() map[string]string { + return map_MachineConfigurationList +} + +var map_MachineConfigurationSpec = map[string]string{ + "managedBootImages": "managedBootImages allows configuration for the management of boot images for machine resources within the cluster. This configuration allows users to select resources that should be updated to the latest boot images during cluster upgrades, ensuring that new machines always boot with the current cluster version's boot image. When omitted, no boot images will be updated.", + "nodeDisruptionPolicy": "nodeDisruptionPolicy allows an admin to set granular node disruption actions for MachineConfig-based updates, such as drains, service reloads, etc. Specifying this will allow for less downtime when doing small configuration updates to the cluster. This configuration has no effect on cluster upgrades which will still incur node disruption where required.", +} + +func (MachineConfigurationSpec) SwaggerDoc() map[string]string { + return map_MachineConfigurationSpec +} + +var map_MachineConfigurationStatus = map[string]string{ + "observedGeneration": "observedGeneration is the last generation change you've dealt with", + "conditions": "conditions is a list of conditions and their status", + "nodeDisruptionPolicyStatus": "nodeDisruptionPolicyStatus status reflects what the latest cluster-validated policies are, and will be used by the Machine Config Daemon during future node updates.", +} + +func (MachineConfigurationStatus) SwaggerDoc() map[string]string { + return map_MachineConfigurationStatus +} + +var map_MachineManager = map[string]string{ + "": "MachineManager describes a target machine resource that is registered for boot image updates. It stores identifying information such as the resource type and the API Group of the resource. It also provides granular control via the selection field.", + "resource": "resource is the machine management resource's type. The only current valid value is machinesets. machinesets means that the machine manager will only register resources of the kind MachineSet.", + "apiGroup": "apiGroup is name of the APIGroup that the machine management resource belongs to. The only current valid value is machine.openshift.io. machine.openshift.io means that the machine manager will only register resources that belong to OpenShift machine API group.", + "selection": "selection allows granular control of the machine management resources that will be registered for boot image updates.", +} + +func (MachineManager) SwaggerDoc() map[string]string { + return map_MachineManager +} + +var map_MachineManagerSelector = map[string]string{ + "mode": "mode determines how machine managers will be selected for updates. Valid values are All and Partial. All means that every resource matched by the machine manager will be updated. Partial requires specified selector(s) and allows customisation of which resources matched by the machine manager will be updated.", + "partial": "partial provides label selector(s) that can be used to match machine management resources. Only permitted when mode is set to \"Partial\".", +} + +func (MachineManagerSelector) SwaggerDoc() map[string]string { + return map_MachineManagerSelector +} + +var map_ManagedBootImages = map[string]string{ + "machineManagers": "machineManagers can be used to register machine management resources for boot image updates. The Machine Config Operator will watch for changes to this list. Only one entry is permitted per type of machine management resource.", +} + +func (ManagedBootImages) SwaggerDoc() map[string]string { + return map_ManagedBootImages +} + +var map_NodeDisruptionPolicyClusterStatus = map[string]string{ + "": "NodeDisruptionPolicyClusterStatus is the type for the status object, rendered by the controller as a merge of cluster defaults and user provided policies", + "files": "files is a list of MachineConfig file definitions and actions to take to changes on those paths", + "units": "units is a list MachineConfig unit definitions and actions to take on changes to those services", + "sshkey": "sshkey is the overall sshkey MachineConfig definition", +} + +func (NodeDisruptionPolicyClusterStatus) SwaggerDoc() map[string]string { + return map_NodeDisruptionPolicyClusterStatus +} + +var map_NodeDisruptionPolicyConfig = map[string]string{ + "": "NodeDisruptionPolicyConfig is the overall spec definition for files/units/sshkeys", + "files": "files is a list of MachineConfig file definitions and actions to take to changes on those paths This list supports a maximum of 50 entries.", + "units": "units is a list MachineConfig unit definitions and actions to take on changes to those services This list supports a maximum of 50 entries.", + "sshkey": "sshkey maps to the ignition.sshkeys field in the MachineConfig object, definition an action for this will apply to all sshkey changes in the cluster", +} + +func (NodeDisruptionPolicyConfig) SwaggerDoc() map[string]string { + return map_NodeDisruptionPolicyConfig +} + +var map_NodeDisruptionPolicySpecAction = map[string]string{ + "type": "type represents the commands that will be carried out if this NodeDisruptionPolicySpecActionType is executed Valid values are Reboot, Drain, Reload, Restart, DaemonReload and None. reload/restart requires a corresponding service target specified in the reload/restart field. Other values require no further configuration", + "reload": "reload specifies the service to reload, only valid if type is reload", + "restart": "restart specifies the service to restart, only valid if type is restart", +} + +func (NodeDisruptionPolicySpecAction) SwaggerDoc() map[string]string { + return map_NodeDisruptionPolicySpecAction +} + +var map_NodeDisruptionPolicySpecFile = map[string]string{ + "": "NodeDisruptionPolicySpecFile is a file entry and corresponding actions to take and is used in the NodeDisruptionPolicyConfig object", + "path": "path is the location of a file being managed through a MachineConfig. The Actions in the policy will apply to changes to the file at this path.", + "actions": "actions represents the series of commands to be executed on changes to the file at the corresponding file path. Actions will be applied in the order that they are set in this list. If there are other incoming changes to other MachineConfig entries in the same update that require a reboot, the reboot will supercede these actions. Valid actions are Reboot, Drain, Reload, DaemonReload and None. The Reboot action and the None action cannot be used in conjunction with any of the other actions. This list supports a maximum of 10 entries.", +} + +func (NodeDisruptionPolicySpecFile) SwaggerDoc() map[string]string { + return map_NodeDisruptionPolicySpecFile +} + +var map_NodeDisruptionPolicySpecSSHKey = map[string]string{ + "": "NodeDisruptionPolicySpecSSHKey is actions to take for any SSHKey change and is used in the NodeDisruptionPolicyConfig object", + "actions": "actions represents the series of commands to be executed on changes to the file at the corresponding file path. Actions will be applied in the order that they are set in this list. If there are other incoming changes to other MachineConfig entries in the same update that require a reboot, the reboot will supercede these actions. Valid actions are Reboot, Drain, Reload, DaemonReload and None. The Reboot action and the None action cannot be used in conjunction with any of the other actions. This list supports a maximum of 10 entries.", +} + +func (NodeDisruptionPolicySpecSSHKey) SwaggerDoc() map[string]string { + return map_NodeDisruptionPolicySpecSSHKey +} + +var map_NodeDisruptionPolicySpecUnit = map[string]string{ + "": "NodeDisruptionPolicySpecUnit is a systemd unit name and corresponding actions to take and is used in the NodeDisruptionPolicyConfig object", + "name": "name represents the service name of a systemd service managed through a MachineConfig Actions specified will be applied for changes to the named service. Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, \":\", \"-\", \"_\", \".\", and \"\". ${SERVICETYPE} must be one of \".service\", \".socket\", \".device\", \".mount\", \".automount\", \".swap\", \".target\", \".path\", \".timer\", \".snapshot\", \".slice\" or \".scope\".", + "actions": "actions represents the series of commands to be executed on changes to the file at the corresponding file path. Actions will be applied in the order that they are set in this list. If there are other incoming changes to other MachineConfig entries in the same update that require a reboot, the reboot will supercede these actions. Valid actions are Reboot, Drain, Reload, DaemonReload and None. The Reboot action and the None action cannot be used in conjunction with any of the other actions. This list supports a maximum of 10 entries.", +} + +func (NodeDisruptionPolicySpecUnit) SwaggerDoc() map[string]string { + return map_NodeDisruptionPolicySpecUnit +} + +var map_NodeDisruptionPolicyStatus = map[string]string{ + "clusterPolicies": "clusterPolicies is a merge of cluster default and user provided node disruption policies.", +} + +func (NodeDisruptionPolicyStatus) SwaggerDoc() map[string]string { + return map_NodeDisruptionPolicyStatus +} + +var map_NodeDisruptionPolicyStatusAction = map[string]string{ + "type": "type represents the commands that will be carried out if this NodeDisruptionPolicyStatusActionType is executed Valid values are Reboot, Drain, Reload, Restart, DaemonReload, None and Special. reload/restart requires a corresponding service target specified in the reload/restart field. Other values require no further configuration", + "reload": "reload specifies the service to reload, only valid if type is reload", + "restart": "restart specifies the service to restart, only valid if type is restart", +} + +func (NodeDisruptionPolicyStatusAction) SwaggerDoc() map[string]string { + return map_NodeDisruptionPolicyStatusAction +} + +var map_NodeDisruptionPolicyStatusFile = map[string]string{ + "": "NodeDisruptionPolicyStatusFile is a file entry and corresponding actions to take and is used in the NodeDisruptionPolicyClusterStatus object", + "path": "path is the location of a file being managed through a MachineConfig. The Actions in the policy will apply to changes to the file at this path.", + "actions": "actions represents the series of commands to be executed on changes to the file at the corresponding file path. Actions will be applied in the order that they are set in this list. If there are other incoming changes to other MachineConfig entries in the same update that require a reboot, the reboot will supercede these actions. Valid actions are Reboot, Drain, Reload, DaemonReload and None. The Reboot action and the None action cannot be used in conjunction with any of the other actions. This list supports a maximum of 10 entries.", +} + +func (NodeDisruptionPolicyStatusFile) SwaggerDoc() map[string]string { + return map_NodeDisruptionPolicyStatusFile +} + +var map_NodeDisruptionPolicyStatusSSHKey = map[string]string{ + "": "NodeDisruptionPolicyStatusSSHKey is actions to take for any SSHKey change and is used in the NodeDisruptionPolicyClusterStatus object", + "actions": "actions represents the series of commands to be executed on changes to the file at the corresponding file path. Actions will be applied in the order that they are set in this list. If there are other incoming changes to other MachineConfig entries in the same update that require a reboot, the reboot will supercede these actions. Valid actions are Reboot, Drain, Reload, DaemonReload and None. The Reboot action and the None action cannot be used in conjunction with any of the other actions. This list supports a maximum of 10 entries.", +} + +func (NodeDisruptionPolicyStatusSSHKey) SwaggerDoc() map[string]string { + return map_NodeDisruptionPolicyStatusSSHKey +} + +var map_NodeDisruptionPolicyStatusUnit = map[string]string{ + "": "NodeDisruptionPolicyStatusUnit is a systemd unit name and corresponding actions to take and is used in the NodeDisruptionPolicyClusterStatus object", + "name": "name represents the service name of a systemd service managed through a MachineConfig Actions specified will be applied for changes to the named service. Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, \":\", \"-\", \"_\", \".\", and \"\". ${SERVICETYPE} must be one of \".service\", \".socket\", \".device\", \".mount\", \".automount\", \".swap\", \".target\", \".path\", \".timer\", \".snapshot\", \".slice\" or \".scope\".", + "actions": "actions represents the series of commands to be executed on changes to the file at the corresponding file path. Actions will be applied in the order that they are set in this list. If there are other incoming changes to other MachineConfig entries in the same update that require a reboot, the reboot will supercede these actions. Valid actions are Reboot, Drain, Reload, DaemonReload and None. The Reboot action and the None action cannot be used in conjunction with any of the other actions. This list supports a maximum of 10 entries.", +} + +func (NodeDisruptionPolicyStatusUnit) SwaggerDoc() map[string]string { + return map_NodeDisruptionPolicyStatusUnit +} + +var map_PartialSelector = map[string]string{ + "": "PartialSelector provides label selector(s) that can be used to match machine management resources.", + "machineResourceSelector": "machineResourceSelector is a label selector that can be used to select machine resources like MachineSets.", +} + +func (PartialSelector) SwaggerDoc() map[string]string { + return map_PartialSelector +} + +var map_ReloadService = map[string]string{ + "": "ReloadService allows the user to specify the services to be reloaded", + "serviceName": "serviceName is the full name (e.g. crio.service) of the service to be reloaded Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, \":\", \"-\", \"_\", \".\", and \"\". ${SERVICETYPE} must be one of \".service\", \".socket\", \".device\", \".mount\", \".automount\", \".swap\", \".target\", \".path\", \".timer\", \".snapshot\", \".slice\" or \".scope\".", +} + +func (ReloadService) SwaggerDoc() map[string]string { + return map_ReloadService +} + +var map_RestartService = map[string]string{ + "": "RestartService allows the user to specify the services to be restarted", + "serviceName": "serviceName is the full name (e.g. crio.service) of the service to be restarted Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, \":\", \"-\", \"_\", \".\", and \"\". ${SERVICETYPE} must be one of \".service\", \".socket\", \".device\", \".mount\", \".automount\", \".swap\", \".target\", \".path\", \".timer\", \".snapshot\", \".slice\" or \".scope\".", +} + +func (RestartService) SwaggerDoc() map[string]string { + return map_RestartService +} + +var map_AdditionalNetworkDefinition = map[string]string{ + "": "AdditionalNetworkDefinition configures an extra network that is available but not created by default. Instead, pods must request them by name. type must be specified, along with exactly one \"Config\" that matches the type.", + "type": "type is the type of network The supported values are NetworkTypeRaw, NetworkTypeSimpleMacvlan", + "name": "name is the name of the network. This will be populated in the resulting CRD This must be unique.", + "namespace": "namespace is the namespace of the network. This will be populated in the resulting CRD If not given the network will be created in the default namespace.", + "rawCNIConfig": "rawCNIConfig is the raw CNI configuration json to create in the NetworkAttachmentDefinition CRD", + "simpleMacvlanConfig": "simpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan", +} + +func (AdditionalNetworkDefinition) SwaggerDoc() map[string]string { + return map_AdditionalNetworkDefinition +} + +var map_AdditionalRoutingCapabilities = map[string]string{ + "": "AdditionalRoutingCapabilities describes components and relevant configuration providing advanced routing capabilities.", + "providers": "providers is a set of enabled components that provide additional routing capabilities. Entries on this list must be unique. The only valid value is currrently \"FRR\" which provides FRR routing capabilities through the deployment of FRR.", +} + +func (AdditionalRoutingCapabilities) SwaggerDoc() map[string]string { + return map_AdditionalRoutingCapabilities +} + +var map_ClusterNetworkEntry = map[string]string{ + "": "ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If the HostPrefix field is not used by the plugin, it can be left unset. Not all network providers support multiple ClusterNetworks", +} + +func (ClusterNetworkEntry) SwaggerDoc() map[string]string { + return map_ClusterNetworkEntry +} + +var map_DefaultNetworkDefinition = map[string]string{ + "": "DefaultNetworkDefinition represents a single network plugin's configuration. type must be specified, along with exactly one \"Config\" that matches the type.", + "type": "type is the type of network All NetworkTypes are supported except for NetworkTypeRaw", + "openshiftSDNConfig": "openshiftSDNConfig was previously used to configure the openshift-sdn plugin. DEPRECATED: OpenShift SDN is no longer supported.", + "ovnKubernetesConfig": "ovnKubernetesConfig configures the ovn-kubernetes plugin.", +} + +func (DefaultNetworkDefinition) SwaggerDoc() map[string]string { + return map_DefaultNetworkDefinition +} + +var map_EgressIPConfig = map[string]string{ + "": "EgressIPConfig defines the configuration knobs for egressip", + "reachabilityTotalTimeoutSeconds": "reachabilityTotalTimeout configures the EgressIP node reachability check total timeout in seconds. If the EgressIP node cannot be reached within this timeout, the node is declared down. Setting a large value may cause the EgressIP feature to react slowly to node changes. In particular, it may react slowly for EgressIP nodes that really have a genuine problem and are unreachable. When omitted, this means the user has no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is 1 second. A value of 0 disables the EgressIP node's reachability check.", +} + +func (EgressIPConfig) SwaggerDoc() map[string]string { + return map_EgressIPConfig +} + +var map_ExportNetworkFlows = map[string]string{ + "netFlow": "netFlow defines the NetFlow configuration.", + "sFlow": "sFlow defines the SFlow configuration.", + "ipfix": "ipfix defines IPFIX configuration.", +} + +func (ExportNetworkFlows) SwaggerDoc() map[string]string { + return map_ExportNetworkFlows +} + +var map_FeaturesMigration = map[string]string{ + "egressIP": "egressIP specified whether or not the Egress IP configuration was migrated. DEPRECATED: network type migration is no longer supported.", + "egressFirewall": "egressFirewall specified whether or not the Egress Firewall configuration was migrated. DEPRECATED: network type migration is no longer supported.", + "multicast": "multicast specified whether or not the multicast configuration was migrated. DEPRECATED: network type migration is no longer supported.", +} + +func (FeaturesMigration) SwaggerDoc() map[string]string { + return map_FeaturesMigration +} + +var map_GatewayConfig = map[string]string{ + "": "GatewayConfig holds node gateway-related parsed config file parameters and command-line overrides", + "routingViaHost": "routingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port into the host before sending it out. If this is not set, traffic will always egress directly from OVN to outside without touching the host stack. Setting this to true means hardware offload will not be supported. Default is false if GatewayConfig is specified.", + "ipForwarding": "ipForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across OVN-Kubernetes managed interfaces, then set this field to \"Global\". The supported values are \"Restricted\" and \"Global\".", + "ipv4": "ipv4 allows users to configure IP settings for IPv4 connections. When omitted, this means no opinion and the default configuration is used. Check individual members fields within ipv4 for details of default values.", + "ipv6": "ipv6 allows users to configure IP settings for IPv6 connections. When omitted, this means no opinion and the default configuration is used. Check individual members fields within ipv6 for details of default values.", +} + +func (GatewayConfig) SwaggerDoc() map[string]string { + return map_GatewayConfig +} + +var map_HybridOverlayConfig = map[string]string{ + "hybridClusterNetwork": "hybridClusterNetwork defines a network space given to nodes on an additional overlay network.", + "hybridOverlayVXLANPort": "hybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. Default is 4789", +} + +func (HybridOverlayConfig) SwaggerDoc() map[string]string { + return map_HybridOverlayConfig +} + +var map_IPAMConfig = map[string]string{ + "": "IPAMConfig contains configurations for IPAM (IP Address Management)", + "type": "type is the type of IPAM module will be used for IP Address Management(IPAM). The supported values are IPAMTypeDHCP, IPAMTypeStatic", + "staticIPAMConfig": "staticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic", +} + +func (IPAMConfig) SwaggerDoc() map[string]string { + return map_IPAMConfig +} + +var map_IPFIXConfig = map[string]string{ + "collectors": "ipfixCollectors is list of strings formatted as ip:port with a maximum of ten items", +} + +func (IPFIXConfig) SwaggerDoc() map[string]string { + return map_IPFIXConfig +} + +var map_IPsecConfig = map[string]string{ + "mode": "mode defines the behaviour of the ipsec configuration within the platform. Valid values are `Disabled`, `External` and `Full`. When 'Disabled', ipsec will not be enabled at the node level. When 'External', ipsec is enabled on the node level but requires the user to configure the secure communication parameters. This mode is for external secure communications and the configuration can be done using the k8s-nmstate operator. When 'Full', ipsec is configured on the node level and inter-pod secure communication within the cluster is configured. Note with `Full`, if ipsec is desired for communication with external (to the cluster) entities (such as storage arrays), this is left to the user to configure.", +} + +func (IPsecConfig) SwaggerDoc() map[string]string { + return map_IPsecConfig +} + +var map_IPv4GatewayConfig = map[string]string{ + "": "IPV4GatewayConfig holds the configuration paramaters for IPV4 connections in the GatewayConfig for OVN-Kubernetes", + "internalMasqueradeSubnet": "internalMasqueradeSubnet contains the masquerade addresses in IPV4 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /29). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 169.254.169.0/29 The value must be in proper IPV4 CIDR format", +} + +func (IPv4GatewayConfig) SwaggerDoc() map[string]string { + return map_IPv4GatewayConfig +} + +var map_IPv4OVNKubernetesConfig = map[string]string{ + "internalTransitSwitchSubnet": "internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect architecture that connects the cluster routers on each node together to enable east west traffic. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. The value cannot be changed after installation. When ommitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 100.88.0.0/16 The subnet must be large enough to accomadate one IP per node in your cluster The value must be in proper IPV4 CIDR format", + "internalJoinSubnet": "internalJoinSubnet is a v4 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. The current default value is 100.64.0.0/16 The subnet must be large enough to accomadate one IP per node in your cluster The value must be in proper IPV4 CIDR format", +} + +func (IPv4OVNKubernetesConfig) SwaggerDoc() map[string]string { + return map_IPv4OVNKubernetesConfig +} + +var map_IPv6GatewayConfig = map[string]string{ + "": "IPV6GatewayConfig holds the configuration paramaters for IPV6 connections in the GatewayConfig for OVN-Kubernetes", + "internalMasqueradeSubnet": "internalMasqueradeSubnet contains the masquerade addresses in IPV6 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /125). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is fd69::/125 Note that IPV6 dual addresses are not permitted", +} + +func (IPv6GatewayConfig) SwaggerDoc() map[string]string { + return map_IPv6GatewayConfig +} + +var map_IPv6OVNKubernetesConfig = map[string]string{ + "internalTransitSwitchSubnet": "internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect architecture that connects the cluster routers on each node together to enable east west traffic. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. The value cannot be changed after installation. When ommitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The subnet must be large enough to accomadate one IP per node in your cluster The current default subnet is fd97::/64 The value must be in proper IPV6 CIDR format Note that IPV6 dual addresses are not permitted", + "internalJoinSubnet": "internalJoinSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. The subnet must be large enough to accomadate one IP per node in your cluster The current default value is fd98::/48 The value must be in proper IPV6 CIDR format Note that IPV6 dual addresses are not permitted", +} + +func (IPv6OVNKubernetesConfig) SwaggerDoc() map[string]string { + return map_IPv6OVNKubernetesConfig +} + +var map_MTUMigration = map[string]string{ + "": "MTUMigration contains infomation about MTU migration.", + "network": "network contains information about MTU migration for the default network. Migrations are only allowed to MTU values lower than the machine's uplink MTU by the minimum appropriate offset.", + "machine": "machine contains MTU migration configuration for the machine's uplink. Needs to be migrated along with the default network MTU unless the current uplink MTU already accommodates the default network MTU.", +} + +func (MTUMigration) SwaggerDoc() map[string]string { + return map_MTUMigration +} + +var map_MTUMigrationValues = map[string]string{ + "": "MTUMigrationValues contains the values for a MTU migration.", + "to": "to is the MTU to migrate to.", + "from": "from is the MTU to migrate from.", +} + +func (MTUMigrationValues) SwaggerDoc() map[string]string { + return map_MTUMigrationValues +} + +var map_NetFlowConfig = map[string]string{ + "collectors": "netFlow defines the NetFlow collectors that will consume the flow data exported from OVS. It is a list of strings formatted as ip:port with a maximum of ten items", +} + +func (NetFlowConfig) SwaggerDoc() map[string]string { + return map_NetFlowConfig +} + +var map_Network = map[string]string{ + "": "Network describes the cluster's desired network configuration. It is consumed by the cluster-network-operator.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (Network) SwaggerDoc() map[string]string { + return map_Network +} + +var map_NetworkList = map[string]string{ + "": "NetworkList contains a list of Network configurations\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (NetworkList) SwaggerDoc() map[string]string { + return map_NetworkList +} + +var map_NetworkMigration = map[string]string{ + "": "NetworkMigration represents the cluster network migration configuration.", + "mtu": "mtu contains the MTU migration configuration. Set this to allow changing the MTU values for the default network. If unset, the operation of changing the MTU for the default network will be rejected.", + "networkType": "networkType was previously used when changing the default network type. DEPRECATED: network type migration is no longer supported, and setting this to a non-empty value will result in the network operator rejecting the configuration.", + "features": "features was previously used to configure which network plugin features would be migrated in a network type migration. DEPRECATED: network type migration is no longer supported, and setting this to a non-empty value will result in the network operator rejecting the configuration.", + "mode": "mode indicates the mode of network type migration. DEPRECATED: network type migration is no longer supported, and setting this to a non-empty value will result in the network operator rejecting the configuration.", +} + +func (NetworkMigration) SwaggerDoc() map[string]string { + return map_NetworkMigration +} + +var map_NetworkSpec = map[string]string{ + "": "NetworkSpec is the top-level network configuration object.", + "clusterNetwork": "clusterNetwork is the IP address pool to use for pod IPs. Some network providers support multiple ClusterNetworks. Others only support one. This is equivalent to the cluster-cidr.", + "serviceNetwork": "serviceNetwork is the ip address pool to use for Service IPs Currently, all existing network providers only support a single value here, but this is an array to allow for growth.", + "defaultNetwork": "defaultNetwork is the \"default\" network that all pods will receive", + "additionalNetworks": "additionalNetworks is a list of extra networks to make available to pods when multiple networks are enabled.", + "disableMultiNetwork": "disableMultiNetwork specifies whether or not multiple pod network support should be disabled. If unset, this property defaults to 'false' and multiple network support is enabled.", + "useMultiNetworkPolicy": "useMultiNetworkPolicy enables a controller which allows for MultiNetworkPolicy objects to be used on additional networks as created by Multus CNI. MultiNetworkPolicy are similar to NetworkPolicy objects, but NetworkPolicy objects only apply to the primary interface. With MultiNetworkPolicy, you can control the traffic that a pod can receive over the secondary interfaces. If unset, this property defaults to 'false' and MultiNetworkPolicy objects are ignored. If 'disableMultiNetwork' is 'true' then the value of this field is ignored.", + "deployKubeProxy": "deployKubeProxy specifies whether or not a standalone kube-proxy should be deployed by the operator. Some network providers include kube-proxy or similar functionality. If unset, the plugin will attempt to select the correct value, which is false when ovn-kubernetes is used and true otherwise.", + "disableNetworkDiagnostics": "disableNetworkDiagnostics specifies whether or not PodNetworkConnectivityCheck CRs from a test pod to every node, apiserver and LB should be disabled or not. If unset, this property defaults to 'false' and network diagnostics is enabled. Setting this to 'true' would reduce the additional load of the pods performing the checks.", + "kubeProxyConfig": "kubeProxyConfig lets us configure desired proxy configuration, if deployKubeProxy is true. If not specified, sensible defaults will be chosen by OpenShift directly.", + "exportNetworkFlows": "exportNetworkFlows enables and configures the export of network flow metadata from the pod network by using protocols NetFlow, SFlow or IPFIX. Currently only supported on OVN-Kubernetes plugin. If unset, flows will not be exported to any collector.", + "migration": "migration enables and configures cluster network migration, for network changes that cannot be made instantly.", + "additionalRoutingCapabilities": "additionalRoutingCapabilities describes components and relevant configuration providing additional routing capabilities. When set, it enables such components and the usage of the routing capabilities they provide for the machine network. Upstream operators, like MetalLB operator, requiring these capabilities may rely on, or automatically set this attribute. Network plugins may leverage advanced routing capabilities acquired through the enablement of these components but may require specific configuration on their side to do so; refer to their respective documentation and configuration options.", +} + +func (NetworkSpec) SwaggerDoc() map[string]string { + return map_NetworkSpec +} + +var map_NetworkStatus = map[string]string{ + "": "NetworkStatus is detailed operator status, which is distilled up to the Network clusteroperator object.", +} + +func (NetworkStatus) SwaggerDoc() map[string]string { + return map_NetworkStatus +} + +var map_OVNKubernetesConfig = map[string]string{ + "": "ovnKubernetesConfig contains the configuration parameters for networks using the ovn-kubernetes network project", + "mtu": "mtu is the MTU to use for the tunnel interface. This must be 100 bytes smaller than the uplink mtu. Default is 1400", + "genevePort": "geneve port is the UDP port to be used by geneve encapulation. Default is 6081", + "hybridOverlayConfig": "hybridOverlayConfig configures an additional overlay network for peers that are not using OVN.", + "ipsecConfig": "ipsecConfig enables and configures IPsec for pods on the pod network within the cluster.", + "policyAuditConfig": "policyAuditConfig is the configuration for network policy audit events. If unset, reported defaults are used.", + "gatewayConfig": "gatewayConfig holds the configuration for node gateway options.", + "v4InternalSubnet": "v4InternalSubnet is a v4 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. Default is 100.64.0.0/16", + "v6InternalSubnet": "v6InternalSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. Default is fd98::/48", + "egressIPConfig": "egressIPConfig holds the configuration for EgressIP options.", + "ipv4": "ipv4 allows users to configure IP settings for IPv4 connections. When ommitted, this means no opinions and the default configuration is used. Check individual fields within ipv4 for details of default values.", + "ipv6": "ipv6 allows users to configure IP settings for IPv6 connections. When ommitted, this means no opinions and the default configuration is used. Check individual fields within ipv4 for details of default values.", + "routeAdvertisements": "routeAdvertisements determines if the functionality to advertise cluster network routes through a dynamic routing protocol, such as BGP, is enabled or not. This functionality is configured through the ovn-kubernetes RouteAdvertisements CRD. Requires the 'FRR' routing capability provider to be enabled as an additional routing capability. Allowed values are \"Enabled\", \"Disabled\" and ommited. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is \"Disabled\".", +} + +func (OVNKubernetesConfig) SwaggerDoc() map[string]string { + return map_OVNKubernetesConfig +} + +var map_OpenShiftSDNConfig = map[string]string{ + "": "OpenShiftSDNConfig was used to configure the OpenShift SDN plugin. It is no longer used.", + "mode": "mode is one of \"Multitenant\", \"Subnet\", or \"NetworkPolicy\"", + "vxlanPort": "vxlanPort is the port to use for all vxlan packets. The default is 4789.", + "mtu": "mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. This must be 50 bytes smaller than the machine's uplink.", + "useExternalOpenvswitch": "useExternalOpenvswitch used to control whether the operator would deploy an OVS DaemonSet itself or expect someone else to start OVS. As of 4.6, OVS is always run as a system service, and this flag is ignored.", + "enableUnidling": "enableUnidling controls whether or not the service proxy will support idling and unidling of services. By default, unidling is enabled.", +} + +func (OpenShiftSDNConfig) SwaggerDoc() map[string]string { + return map_OpenShiftSDNConfig +} + +var map_PolicyAuditConfig = map[string]string{ + "rateLimit": "rateLimit is the approximate maximum number of messages to generate per-second per-node. If unset the default of 20 msg/sec is used.", + "maxFileSize": "maxFilesSize is the max size an ACL_audit log file is allowed to reach before rotation occurs Units are in MB and the Default is 50MB", + "maxLogFiles": "maxLogFiles specifies the maximum number of ACL_audit log files that can be present.", + "destination": "destination is the location for policy log messages. Regardless of this config, persistent logs will always be dumped to the host at /var/log/ovn/ however Additionally syslog output may be configured as follows. Valid values are: - \"libc\" -> to use the libc syslog() function of the host node's journdald process - \"udp:host:port\" -> for sending syslog over UDP - \"unix:file\" -> for using the UNIX domain socket directly - \"null\" -> to discard all messages logged to syslog The default is \"null\"", + "syslogFacility": "syslogFacility the RFC5424 facility for generated messages, e.g. \"kern\". Default is \"local0\"", +} + +func (PolicyAuditConfig) SwaggerDoc() map[string]string { + return map_PolicyAuditConfig +} + +var map_ProxyConfig = map[string]string{ + "": "ProxyConfig defines the configuration knobs for kubeproxy All of these are optional and have sensible defaults", + "iptablesSyncPeriod": "An internal kube-proxy parameter. In older releases of OCP, this sometimes needed to be adjusted in large clusters for performance reasons, but this is no longer necessary, and there is no reason to change this from the default value. Default: 30s", + "bindAddress": "The address to \"bind\" on Defaults to 0.0.0.0", + "proxyArguments": "Any additional arguments to pass to the kubeproxy process", +} + +func (ProxyConfig) SwaggerDoc() map[string]string { + return map_ProxyConfig +} + +var map_SFlowConfig = map[string]string{ + "collectors": "sFlowCollectors is list of strings formatted as ip:port with a maximum of ten items", +} + +func (SFlowConfig) SwaggerDoc() map[string]string { + return map_SFlowConfig +} + +var map_SimpleMacvlanConfig = map[string]string{ + "": "SimpleMacvlanConfig contains configurations for macvlan interface.", + "master": "master is the host interface to create the macvlan interface from. If not specified, it will be default route interface", + "ipamConfig": "ipamConfig configures IPAM module will be used for IP Address Management (IPAM).", + "mode": "mode is the macvlan mode: bridge, private, vepa, passthru. The default is bridge", + "mtu": "mtu is the mtu to use for the macvlan interface. if unset, host's kernel will select the value.", +} + +func (SimpleMacvlanConfig) SwaggerDoc() map[string]string { + return map_SimpleMacvlanConfig +} + +var map_StaticIPAMAddresses = map[string]string{ + "": "StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses", + "address": "address is the IP address in CIDR format", + "gateway": "gateway is IP inside of subnet to designate as the gateway", +} + +func (StaticIPAMAddresses) SwaggerDoc() map[string]string { + return map_StaticIPAMAddresses +} + +var map_StaticIPAMConfig = map[string]string{ + "": "StaticIPAMConfig contains configurations for static IPAM (IP Address Management)", + "addresses": "addresses configures IP address for the interface", + "routes": "routes configures IP routes for the interface", + "dns": "dns configures DNS for the interface", +} + +func (StaticIPAMConfig) SwaggerDoc() map[string]string { + return map_StaticIPAMConfig +} + +var map_StaticIPAMDNS = map[string]string{ + "": "StaticIPAMDNS provides DNS related information for static IPAM", + "nameservers": "nameservers points DNS servers for IP lookup", + "domain": "domain configures the domainname the local domain used for short hostname lookups", + "search": "search configures priority ordered search domains for short hostname lookups", +} + +func (StaticIPAMDNS) SwaggerDoc() map[string]string { + return map_StaticIPAMDNS +} + +var map_StaticIPAMRoutes = map[string]string{ + "": "StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes", + "destination": "destination points the IP route destination", + "gateway": "gateway is the route's next-hop IP address If unset, a default gateway is assumed (as determined by the CNI plugin).", +} + +func (StaticIPAMRoutes) SwaggerDoc() map[string]string { + return map_StaticIPAMRoutes +} + +var map_OLM = map[string]string{ + "": "OLM provides information to configure an operator to manage the OLM controllers\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (OLM) SwaggerDoc() map[string]string { + return map_OLM +} + +var map_OLMList = map[string]string{ + "": "OLMList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (OLMList) SwaggerDoc() map[string]string { + return map_OLMList +} + +var map_OpenShiftAPIServer = map[string]string{ + "": "OpenShiftAPIServer provides information to configure an operator to manage openshift-apiserver.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the OpenShift API Server.", + "status": "status defines the observed status of the OpenShift API Server.", +} + +func (OpenShiftAPIServer) SwaggerDoc() map[string]string { + return map_OpenShiftAPIServer +} + +var map_OpenShiftAPIServerList = map[string]string{ + "": "OpenShiftAPIServerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (OpenShiftAPIServerList) SwaggerDoc() map[string]string { + return map_OpenShiftAPIServerList +} + +var map_OpenShiftControllerManager = map[string]string{ + "": "OpenShiftControllerManager provides information to configure an operator to manage openshift-controller-manager.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (OpenShiftControllerManager) SwaggerDoc() map[string]string { + return map_OpenShiftControllerManager +} + +var map_OpenShiftControllerManagerList = map[string]string{ + "": "OpenShiftControllerManagerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (OpenShiftControllerManagerList) SwaggerDoc() map[string]string { + return map_OpenShiftControllerManagerList +} + +var map_KubeScheduler = map[string]string{ + "": "KubeScheduler provides information to configure an operator to manage scheduler.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the Kubernetes Scheduler", + "status": "status is the most recently observed status of the Kubernetes Scheduler", +} + +func (KubeScheduler) SwaggerDoc() map[string]string { + return map_KubeScheduler +} + +var map_KubeSchedulerList = map[string]string{ + "": "KubeSchedulerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (KubeSchedulerList) SwaggerDoc() map[string]string { + return map_KubeSchedulerList +} + +var map_ServiceCA = map[string]string{ + "": "ServiceCA provides information to configure an operator to manage the service cert controllers\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (ServiceCA) SwaggerDoc() map[string]string { + return map_ServiceCA +} + +var map_ServiceCAList = map[string]string{ + "": "ServiceCAList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (ServiceCAList) SwaggerDoc() map[string]string { + return map_ServiceCAList +} + +var map_ServiceCatalogAPIServer = map[string]string{ + "": "ServiceCatalogAPIServer provides information to configure an operator to manage Service Catalog API Server DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ServiceCatalogAPIServer) SwaggerDoc() map[string]string { + return map_ServiceCatalogAPIServer +} + +var map_ServiceCatalogAPIServerList = map[string]string{ + "": "ServiceCatalogAPIServerList is a collection of items DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (ServiceCatalogAPIServerList) SwaggerDoc() map[string]string { + return map_ServiceCatalogAPIServerList +} + +var map_ServiceCatalogControllerManager = map[string]string{ + "": "ServiceCatalogControllerManager provides information to configure an operator to manage Service Catalog Controller Manager DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ServiceCatalogControllerManager) SwaggerDoc() map[string]string { + return map_ServiceCatalogControllerManager +} + +var map_ServiceCatalogControllerManagerList = map[string]string{ + "": "ServiceCatalogControllerManagerList is a collection of items DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (ServiceCatalogControllerManagerList) SwaggerDoc() map[string]string { + return map_ServiceCatalogControllerManagerList +} + +var map_Storage = map[string]string{ + "": "Storage provides a means to configure an operator to manage the cluster storage operator. `cluster` is the canonical name.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Storage) SwaggerDoc() map[string]string { + return map_Storage +} + +var map_StorageList = map[string]string{ + "": "StorageList contains a list of Storages.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (StorageList) SwaggerDoc() map[string]string { + return map_StorageList +} + +var map_StorageSpec = map[string]string{ + "": "StorageSpec is the specification of the desired behavior of the cluster storage operator.", + "vsphereStorageDriver": "vsphereStorageDriver indicates the storage driver to use on VSphere clusters. Once this field is set to CSIWithMigrationDriver, it can not be changed. If this is empty, the platform will choose a good default, which may change over time without notice. The current default is CSIWithMigrationDriver and may not be changed. DEPRECATED: This field will be removed in a future release.", +} + +func (StorageSpec) SwaggerDoc() map[string]string { + return map_StorageSpec +} + +var map_StorageStatus = map[string]string{ + "": "StorageStatus defines the observed status of the cluster storage operator.", +} + +func (StorageStatus) SwaggerDoc() map[string]string { + return map_StorageStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/osin/v1/doc.go b/vendor/github.com/openshift/api/osin/v1/doc.go new file mode 100644 index 0000000000000..b74dfc48ad0ef --- /dev/null +++ b/vendor/github.com/openshift/api/osin/v1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=osin.config.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/osin/v1/register.go b/vendor/github.com/openshift/api/osin/v1/register.go new file mode 100644 index 0000000000000..4d54a5df40474 --- /dev/null +++ b/vendor/github.com/openshift/api/osin/v1/register.go @@ -0,0 +1,50 @@ +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "osin.config.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &OsinServerConfig{}, + + &BasicAuthPasswordIdentityProvider{}, + &AllowAllPasswordIdentityProvider{}, + &DenyAllPasswordIdentityProvider{}, + &HTPasswdPasswordIdentityProvider{}, + &LDAPPasswordIdentityProvider{}, + &KeystonePasswordIdentityProvider{}, + &RequestHeaderIdentityProvider{}, + &GitHubIdentityProvider{}, + &GitLabIdentityProvider{}, + &GoogleIdentityProvider{}, + &OpenIDIdentityProvider{}, + + &SessionSecrets{}, + ) + return nil +} diff --git a/vendor/github.com/openshift/api/osin/v1/types.go b/vendor/github.com/openshift/api/osin/v1/types.go new file mode 100644 index 0000000000000..35eb3ee8b0169 --- /dev/null +++ b/vendor/github.com/openshift/api/osin/v1/types.go @@ -0,0 +1,488 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + configv1 "github.com/openshift/api/config/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type OsinServerConfig struct { + metav1.TypeMeta `json:",inline"` + + // provides the standard apiserver configuration + configv1.GenericAPIServerConfig `json:",inline"` + + // oauthConfig holds the necessary configuration options for OAuth authentication + OAuthConfig OAuthConfig `json:"oauthConfig"` +} + +// OAuthConfig holds the necessary configuration options for OAuth authentication +type OAuthConfig struct { + // masterCA is the CA for verifying the TLS connection back to the MasterURL. + // This field is deprecated and will be removed in a future release. + // See loginURL for details. + // Deprecated + MasterCA *string `json:"masterCA"` + + // masterURL is used for making server-to-server calls to exchange authorization codes for access tokens + // This field is deprecated and will be removed in a future release. + // See loginURL for details. + // Deprecated + MasterURL string `json:"masterURL"` + + // masterPublicURL is used for building valid client redirect URLs for internal and external access + // This field is deprecated and will be removed in a future release. + // See loginURL for details. + // Deprecated + MasterPublicURL string `json:"masterPublicURL"` + + // loginURL, along with masterCA, masterURL and masterPublicURL have distinct + // meanings depending on how the OAuth server is run. The two states are: + // 1. embedded in the kube api server (all 3.x releases) + // 2. as a standalone external process (all 4.x releases) + // in the embedded configuration, loginURL is equivalent to masterPublicURL + // and the other fields have functionality that matches their docs. + // in the standalone configuration, the fields are used as: + // loginURL is the URL required to login to the cluster: + // oc login --server= + // masterPublicURL is the issuer URL + // it is accessible from inside (service network) and outside (ingress) of the cluster + // masterURL is the loopback variation of the token_endpoint URL with no path component + // it is only accessible from inside (service network) of the cluster + // masterCA is used to perform TLS verification for connections made to masterURL + // For further details, see the IETF Draft: + // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + LoginURL string `json:"loginURL"` + + // assetPublicURL is used for building valid client redirect URLs for external access + AssetPublicURL string `json:"assetPublicURL"` + + // alwaysShowProviderSelection will force the provider selection page to render even when there is only a single provider. + AlwaysShowProviderSelection bool `json:"alwaysShowProviderSelection"` + + //identityProviders is an ordered list of ways for a user to identify themselves + IdentityProviders []IdentityProvider `json:"identityProviders"` + + // grantConfig describes how to handle grants + GrantConfig GrantConfig `json:"grantConfig"` + + // sessionConfig hold information about configuring sessions. + SessionConfig *SessionConfig `json:"sessionConfig"` + + // tokenConfig contains options for authorization and access tokens + TokenConfig TokenConfig `json:"tokenConfig"` + + // templates allow you to customize pages like the login page. + Templates *OAuthTemplates `json:"templates"` +} + +// OAuthTemplates allow for customization of pages like the login page +type OAuthTemplates struct { + // login is a path to a file containing a go template used to render the login page. + // If unspecified, the default login page is used. + Login string `json:"login"` + + // providerSelection is a path to a file containing a go template used to render the provider selection page. + // If unspecified, the default provider selection page is used. + ProviderSelection string `json:"providerSelection"` + + // error is a path to a file containing a go template used to render error pages during the authentication or grant flow + // If unspecified, the default error page is used. + Error string `json:"error"` +} + +// IdentityProvider provides identities for users authenticating using credentials +type IdentityProvider struct { + // name is used to qualify the identities returned by this provider + Name string `json:"name"` + // challenge indicates whether to issue WWW-Authenticate challenges for this provider + UseAsChallenger bool `json:"challenge"` + // login indicates whether to use this identity provider for unauthenticated browsers to login against + UseAsLogin bool `json:"login"` + // mappingMethod determines how identities from this provider are mapped to users + MappingMethod string `json:"mappingMethod"` + // provider contains the information about how to set up a specific identity provider + // +kubebuilder:pruning:PreserveUnknownFields + Provider runtime.RawExtension `json:"provider"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type BasicAuthPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // RemoteConnectionInfo contains information about how to connect to the external basic auth server + configv1.RemoteConnectionInfo `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AllowAllPasswordIdentityProvider provides identities for users authenticating using non-empty passwords +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type AllowAllPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DenyAllPasswordIdentityProvider provides no identities for users +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type DenyAllPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type HTPasswdPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // file is a reference to your htpasswd file + File string `json:"file"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type LDAPPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + // url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is + // ldap://host:port/basedn?attribute?scope?filter + URL string `json:"url"` + // bindDN is an optional DN to bind with during the search phase. + BindDN string `json:"bindDN"` + // bindPassword is an optional password to bind with during the search phase. + BindPassword configv1.StringSource `json:"bindPassword"` + + // insecure, if true, indicates the connection should not use TLS. + // Cannot be set to true with a URL scheme of "ldaps://" + // If false, "ldaps://" URLs connect using TLS, and "ldap://" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830 + Insecure bool `json:"insecure"` + // ca is the optional trusted certificate authority bundle to use when making requests to the server + // If empty, the default system roots are used + CA string `json:"ca"` + // attributes maps LDAP attributes to identities + Attributes LDAPAttributeMapping `json:"attributes"` +} + +// LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields +type LDAPAttributeMapping struct { + // id is the list of attributes whose values should be used as the user ID. Required. + // LDAP standard identity attribute is "dn" + ID []string `json:"id"` + // preferredUsername is the list of attributes whose values should be used as the preferred username. + // LDAP standard login attribute is "uid" + PreferredUsername []string `json:"preferredUsername"` + // name is the list of attributes whose values should be used as the display name. Optional. + // If unspecified, no display name is set for the identity + // LDAP standard display name attribute is "cn" + Name []string `json:"name"` + // email is the list of attributes whose values should be used as the email address. Optional. + // If unspecified, no email is set for the identity + Email []string `json:"email"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type KeystonePasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + // RemoteConnectionInfo contains information about how to connect to the keystone server + configv1.RemoteConnectionInfo `json:",inline"` + // domainName is required for keystone v3 + DomainName string `json:"domainName"` + // useKeystoneIdentity flag indicates that user should be authenticated by keystone ID, not by username + UseKeystoneIdentity bool `json:"useKeystoneIdentity"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type RequestHeaderIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // loginURL is a URL to redirect unauthenticated /authorize requests to + // Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here + // ${url} is replaced with the current URL, escaped to be safe in a query parameter + // https://www.example.com/sso-login?then=${url} + // ${query} is replaced with the current query string + // https://www.example.com/auth-proxy/oauth/authorize?${query} + LoginURL string `json:"loginURL"` + + // challengeURL is a URL to redirect unauthenticated /authorize requests to + // Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here + // ${url} is replaced with the current URL, escaped to be safe in a query parameter + // https://www.example.com/sso-login?then=${url} + // ${query} is replaced with the current query string + // https://www.example.com/auth-proxy/oauth/authorize?${query} + ChallengeURL string `json:"challengeURL"` + + // clientCA is a file with the trusted signer certs. If empty, no request verification is done, and any direct request to the OAuth server can impersonate any identity from this provider, merely by setting a request header. + ClientCA string `json:"clientCA"` + // clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative. + ClientCommonNames []string `json:"clientCommonNames"` + + // headers is the set of headers to check for identity information + Headers []string `json:"headers"` + // preferredUsernameHeaders is the set of headers to check for the preferred username + PreferredUsernameHeaders []string `json:"preferredUsernameHeaders"` + // nameHeaders is the set of headers to check for the display name + NameHeaders []string `json:"nameHeaders"` + // emailHeaders is the set of headers to check for the email address + EmailHeaders []string `json:"emailHeaders"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GitHubIdentityProvider provides identities for users authenticating using GitHub credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type GitHubIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // clientID is the oauth client ID + ClientID string `json:"clientID"` + // clientSecret is the oauth client secret + ClientSecret configv1.StringSource `json:"clientSecret"` + // organizations optionally restricts which organizations are allowed to log in + Organizations []string `json:"organizations"` + // teams optionally restricts which teams are allowed to log in. Format is /. + Teams []string `json:"teams"` + // hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of GitHub Enterprise. + // It must match the GitHub Enterprise settings value that is configured at /setup/settings#hostname. + Hostname string `json:"hostname"` + // ca is the optional trusted certificate authority bundle to use when making requests to the server. + // If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. + CA string `json:"ca"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GitLabIdentityProvider provides identities for users authenticating using GitLab credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type GitLabIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // ca is the optional trusted certificate authority bundle to use when making requests to the server + // If empty, the default system roots are used + CA string `json:"ca"` + // url is the oauth server base URL + URL string `json:"url"` + // clientID is the oauth client ID + ClientID string `json:"clientID"` + // clientSecret is the oauth client secret + ClientSecret configv1.StringSource `json:"clientSecret"` + // legacy determines if OAuth2 or OIDC should be used + // If true, OAuth2 is used + // If false, OIDC is used + // If nil and the URL's host is gitlab.com, OIDC is used + // Otherwise, OAuth2 is used + // In a future release, nil will default to using OIDC + // Eventually this flag will be removed and only OIDC will be used + Legacy *bool `json:"legacy,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GoogleIdentityProvider provides identities for users authenticating using Google credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type GoogleIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // clientID is the oauth client ID + ClientID string `json:"clientID"` + // clientSecret is the oauth client secret + ClientSecret configv1.StringSource `json:"clientSecret"` + + // hostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to + HostedDomain string `json:"hostedDomain"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type OpenIDIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // ca is the optional trusted certificate authority bundle to use when making requests to the server + // If empty, the default system roots are used + CA string `json:"ca"` + + // clientID is the oauth client ID + ClientID string `json:"clientID"` + // clientSecret is the oauth client secret + ClientSecret configv1.StringSource `json:"clientSecret"` + + // extraScopes are any scopes to request in addition to the standard "openid" scope. + ExtraScopes []string `json:"extraScopes"` + + // extraAuthorizeParameters are any custom parameters to add to the authorize request. + ExtraAuthorizeParameters map[string]string `json:"extraAuthorizeParameters"` + + // urls to use to authenticate + URLs OpenIDURLs `json:"urls"` + + // claims mappings + Claims OpenIDClaims `json:"claims"` +} + +// OpenIDURLs are URLs to use when authenticating with an OpenID identity provider +type OpenIDURLs struct { + // authorize is the oauth authorization URL + Authorize string `json:"authorize"` + // token is the oauth token granting URL + Token string `json:"token"` + // userInfo is the optional userinfo URL. + // If present, a granted access_token is used to request claims + // If empty, a granted id_token is parsed for claims + UserInfo string `json:"userInfo"` +} + +// OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider +type OpenIDClaims struct { + // id is the list of claims whose values should be used as the user ID. Required. + // OpenID standard identity claim is "sub" + ID []string `json:"id"` + // preferredUsername is the list of claims whose values should be used as the preferred username. + // If unspecified, the preferred username is determined from the value of the id claim + PreferredUsername []string `json:"preferredUsername"` + // name is the list of claims whose values should be used as the display name. Optional. + // If unspecified, no display name is set for the identity + Name []string `json:"name"` + // email is the list of claims whose values should be used as the email address. Optional. + // If unspecified, no email is set for the identity + Email []string `json:"email"` + // groups is the list of claims value of which should be used to synchronize groups + // from the OIDC provider to OpenShift for the user + Groups []string `json:"groups"` +} + +// GrantConfig holds the necessary configuration options for grant handlers +type GrantConfig struct { + // method determines the default strategy to use when an OAuth client requests a grant. + // This method will be used only if the specific OAuth client doesn't provide a strategy + // of their own. Valid grant handling methods are: + // - auto: always approves grant requests, useful for trusted clients + // - prompt: prompts the end user for approval of grant requests, useful for third-party clients + // - deny: always denies grant requests, useful for black-listed clients + Method GrantHandlerType `json:"method"` + + // serviceAccountMethod is used for determining client authorization for service account oauth client. + // It must be either: deny, prompt + ServiceAccountMethod GrantHandlerType `json:"serviceAccountMethod"` +} + +type GrantHandlerType string + +const ( + // auto auto-approves client authorization grant requests + GrantHandlerAuto GrantHandlerType = "auto" + // prompt prompts the user to approve new client authorization grant requests + GrantHandlerPrompt GrantHandlerType = "prompt" + // deny auto-denies client authorization grant requests + GrantHandlerDeny GrantHandlerType = "deny" +) + +// SessionConfig specifies options for cookie-based sessions. Used by AuthRequestHandlerSession +type SessionConfig struct { + // sessionSecretsFile is a reference to a file containing a serialized SessionSecrets object + // If no file is specified, a random signing and encryption key are generated at each server start + SessionSecretsFile string `json:"sessionSecretsFile"` + // sessionMaxAgeSeconds specifies how long created sessions last. Used by AuthRequestHandlerSession + SessionMaxAgeSeconds int32 `json:"sessionMaxAgeSeconds"` + // sessionName is the cookie name used to store the session + SessionName string `json:"sessionName"` +} + +// TokenConfig holds the necessary configuration options for authorization and access tokens +type TokenConfig struct { + // authorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens + AuthorizeTokenMaxAgeSeconds int32 `json:"authorizeTokenMaxAgeSeconds,omitempty"` + // accessTokenMaxAgeSeconds defines the maximum age of access tokens + AccessTokenMaxAgeSeconds int32 `json:"accessTokenMaxAgeSeconds,omitempty"` + // accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect. + // +optional + AccessTokenInactivityTimeoutSeconds *int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty"` + // accessTokenInactivityTimeout defines the token inactivity timeout + // for tokens granted by any client. + // The value represents the maximum amount of time that can occur between + // consecutive uses of the token. Tokens become invalid if they are not + // used within this temporal window. The user will need to acquire a new + // token to regain access once a token times out. Takes valid time + // duration string such as "5m", "1.5h" or "2h45m". The minimum allowed + // value for duration is 300s (5 minutes). If the timeout is configured + // per client, then that value takes precedence. If the timeout value is + // not specified and the client does not override the value, then tokens + // are valid until their lifetime. + // +optional + AccessTokenInactivityTimeout *metav1.Duration `json:"accessTokenInactivityTimeout,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SessionSecrets list the secrets to use to sign/encrypt and authenticate/decrypt created sessions. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type SessionSecrets struct { + metav1.TypeMeta `json:",inline"` + + // secrets is a list of secrets + // New sessions are signed and encrypted using the first secret. + // Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets. + Secrets []SessionSecret `json:"secrets"` +} + +// SessionSecret is a secret used to authenticate/decrypt cookie-based sessions +type SessionSecret struct { + // authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes. + Authentication string `json:"authentication"` + // encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES- + Encryption string `json:"encryption"` +} diff --git a/vendor/github.com/openshift/api/osin/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/osin/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..cb90b8365df99 --- /dev/null +++ b/vendor/github.com/openshift/api/osin/v1/zz_generated.deepcopy.go @@ -0,0 +1,645 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowAllPasswordIdentityProvider) DeepCopyInto(out *AllowAllPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowAllPasswordIdentityProvider. +func (in *AllowAllPasswordIdentityProvider) DeepCopy() *AllowAllPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(AllowAllPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AllowAllPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuthPasswordIdentityProvider) DeepCopyInto(out *BasicAuthPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.RemoteConnectionInfo = in.RemoteConnectionInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthPasswordIdentityProvider. +func (in *BasicAuthPasswordIdentityProvider) DeepCopy() *BasicAuthPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(BasicAuthPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BasicAuthPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DenyAllPasswordIdentityProvider) DeepCopyInto(out *DenyAllPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DenyAllPasswordIdentityProvider. +func (in *DenyAllPasswordIdentityProvider) DeepCopy() *DenyAllPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(DenyAllPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DenyAllPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitHubIdentityProvider) DeepCopyInto(out *GitHubIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ClientSecret = in.ClientSecret + if in.Organizations != nil { + in, out := &in.Organizations, &out.Organizations + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Teams != nil { + in, out := &in.Teams, &out.Teams + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubIdentityProvider. +func (in *GitHubIdentityProvider) DeepCopy() *GitHubIdentityProvider { + if in == nil { + return nil + } + out := new(GitHubIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GitHubIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitLabIdentityProvider) DeepCopyInto(out *GitLabIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ClientSecret = in.ClientSecret + if in.Legacy != nil { + in, out := &in.Legacy, &out.Legacy + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLabIdentityProvider. +func (in *GitLabIdentityProvider) DeepCopy() *GitLabIdentityProvider { + if in == nil { + return nil + } + out := new(GitLabIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GitLabIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoogleIdentityProvider) DeepCopyInto(out *GoogleIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ClientSecret = in.ClientSecret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleIdentityProvider. +func (in *GoogleIdentityProvider) DeepCopy() *GoogleIdentityProvider { + if in == nil { + return nil + } + out := new(GoogleIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GoogleIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GrantConfig) DeepCopyInto(out *GrantConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrantConfig. +func (in *GrantConfig) DeepCopy() *GrantConfig { + if in == nil { + return nil + } + out := new(GrantConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTPasswdPasswordIdentityProvider) DeepCopyInto(out *HTPasswdPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTPasswdPasswordIdentityProvider. +func (in *HTPasswdPasswordIdentityProvider) DeepCopy() *HTPasswdPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(HTPasswdPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HTPasswdPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProvider) DeepCopyInto(out *IdentityProvider) { + *out = *in + in.Provider.DeepCopyInto(&out.Provider) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProvider. +func (in *IdentityProvider) DeepCopy() *IdentityProvider { + if in == nil { + return nil + } + out := new(IdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeystonePasswordIdentityProvider) DeepCopyInto(out *KeystonePasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.RemoteConnectionInfo = in.RemoteConnectionInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeystonePasswordIdentityProvider. +func (in *KeystonePasswordIdentityProvider) DeepCopy() *KeystonePasswordIdentityProvider { + if in == nil { + return nil + } + out := new(KeystonePasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KeystonePasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPAttributeMapping) DeepCopyInto(out *LDAPAttributeMapping) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsername != nil { + in, out := &in.PreferredUsername, &out.PreferredUsername + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPAttributeMapping. +func (in *LDAPAttributeMapping) DeepCopy() *LDAPAttributeMapping { + if in == nil { + return nil + } + out := new(LDAPAttributeMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPPasswordIdentityProvider) DeepCopyInto(out *LDAPPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.BindPassword = in.BindPassword + in.Attributes.DeepCopyInto(&out.Attributes) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPPasswordIdentityProvider. +func (in *LDAPPasswordIdentityProvider) DeepCopy() *LDAPPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(LDAPPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LDAPPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthConfig) DeepCopyInto(out *OAuthConfig) { + *out = *in + if in.MasterCA != nil { + in, out := &in.MasterCA, &out.MasterCA + *out = new(string) + **out = **in + } + if in.IdentityProviders != nil { + in, out := &in.IdentityProviders, &out.IdentityProviders + *out = make([]IdentityProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.GrantConfig = in.GrantConfig + if in.SessionConfig != nil { + in, out := &in.SessionConfig, &out.SessionConfig + *out = new(SessionConfig) + **out = **in + } + in.TokenConfig.DeepCopyInto(&out.TokenConfig) + if in.Templates != nil { + in, out := &in.Templates, &out.Templates + *out = new(OAuthTemplates) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthConfig. +func (in *OAuthConfig) DeepCopy() *OAuthConfig { + if in == nil { + return nil + } + out := new(OAuthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthTemplates) DeepCopyInto(out *OAuthTemplates) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthTemplates. +func (in *OAuthTemplates) DeepCopy() *OAuthTemplates { + if in == nil { + return nil + } + out := new(OAuthTemplates) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDClaims) DeepCopyInto(out *OpenIDClaims) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsername != nil { + in, out := &in.PreferredUsername, &out.PreferredUsername + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDClaims. +func (in *OpenIDClaims) DeepCopy() *OpenIDClaims { + if in == nil { + return nil + } + out := new(OpenIDClaims) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDIdentityProvider) DeepCopyInto(out *OpenIDIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ClientSecret = in.ClientSecret + if in.ExtraScopes != nil { + in, out := &in.ExtraScopes, &out.ExtraScopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExtraAuthorizeParameters != nil { + in, out := &in.ExtraAuthorizeParameters, &out.ExtraAuthorizeParameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.URLs = in.URLs + in.Claims.DeepCopyInto(&out.Claims) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDIdentityProvider. +func (in *OpenIDIdentityProvider) DeepCopy() *OpenIDIdentityProvider { + if in == nil { + return nil + } + out := new(OpenIDIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenIDIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDURLs) DeepCopyInto(out *OpenIDURLs) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDURLs. +func (in *OpenIDURLs) DeepCopy() *OpenIDURLs { + if in == nil { + return nil + } + out := new(OpenIDURLs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OsinServerConfig) DeepCopyInto(out *OsinServerConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.GenericAPIServerConfig.DeepCopyInto(&out.GenericAPIServerConfig) + in.OAuthConfig.DeepCopyInto(&out.OAuthConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OsinServerConfig. +func (in *OsinServerConfig) DeepCopy() *OsinServerConfig { + if in == nil { + return nil + } + out := new(OsinServerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OsinServerConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderIdentityProvider) DeepCopyInto(out *RequestHeaderIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ClientCommonNames != nil { + in, out := &in.ClientCommonNames, &out.ClientCommonNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsernameHeaders != nil { + in, out := &in.PreferredUsernameHeaders, &out.PreferredUsernameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NameHeaders != nil { + in, out := &in.NameHeaders, &out.NameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.EmailHeaders != nil { + in, out := &in.EmailHeaders, &out.EmailHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderIdentityProvider. +func (in *RequestHeaderIdentityProvider) DeepCopy() *RequestHeaderIdentityProvider { + if in == nil { + return nil + } + out := new(RequestHeaderIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RequestHeaderIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionConfig) DeepCopyInto(out *SessionConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionConfig. +func (in *SessionConfig) DeepCopy() *SessionConfig { + if in == nil { + return nil + } + out := new(SessionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionSecret) DeepCopyInto(out *SessionSecret) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionSecret. +func (in *SessionSecret) DeepCopy() *SessionSecret { + if in == nil { + return nil + } + out := new(SessionSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionSecrets) DeepCopyInto(out *SessionSecrets) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SessionSecret, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionSecrets. +func (in *SessionSecrets) DeepCopy() *SessionSecrets { + if in == nil { + return nil + } + out := new(SessionSecrets) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SessionSecrets) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenConfig) DeepCopyInto(out *TokenConfig) { + *out = *in + if in.AccessTokenInactivityTimeoutSeconds != nil { + in, out := &in.AccessTokenInactivityTimeoutSeconds, &out.AccessTokenInactivityTimeoutSeconds + *out = new(int32) + **out = **in + } + if in.AccessTokenInactivityTimeout != nil { + in, out := &in.AccessTokenInactivityTimeout, &out.AccessTokenInactivityTimeout + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenConfig. +func (in *TokenConfig) DeepCopy() *TokenConfig { + if in == nil { + return nil + } + out := new(TokenConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/osin/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/osin/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000..890928a7a4dc6 --- /dev/null +++ b/vendor/github.com/openshift/api/osin/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,280 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AllowAllPasswordIdentityProvider = map[string]string{ + "": "AllowAllPasswordIdentityProvider provides identities for users authenticating using non-empty passwords\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", +} + +func (AllowAllPasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_AllowAllPasswordIdentityProvider +} + +var map_BasicAuthPasswordIdentityProvider = map[string]string{ + "": "BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", +} + +func (BasicAuthPasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_BasicAuthPasswordIdentityProvider +} + +var map_DenyAllPasswordIdentityProvider = map[string]string{ + "": "DenyAllPasswordIdentityProvider provides no identities for users\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", +} + +func (DenyAllPasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_DenyAllPasswordIdentityProvider +} + +var map_GitHubIdentityProvider = map[string]string{ + "": "GitHubIdentityProvider provides identities for users authenticating using GitHub credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is the oauth client secret", + "organizations": "organizations optionally restricts which organizations are allowed to log in", + "teams": "teams optionally restricts which teams are allowed to log in. Format is /.", + "hostname": "hostname is the optional domain (e.g. \"mycompany.com\") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value that is configured at /setup/settings#hostname.", + "ca": "ca is the optional trusted certificate authority bundle to use when making requests to the server. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value.", +} + +func (GitHubIdentityProvider) SwaggerDoc() map[string]string { + return map_GitHubIdentityProvider +} + +var map_GitLabIdentityProvider = map[string]string{ + "": "GitLabIdentityProvider provides identities for users authenticating using GitLab credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "ca": "ca is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "url": "url is the oauth server base URL", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is the oauth client secret", + "legacy": "legacy determines if OAuth2 or OIDC should be used If true, OAuth2 is used If false, OIDC is used If nil and the URL's host is gitlab.com, OIDC is used Otherwise, OAuth2 is used In a future release, nil will default to using OIDC Eventually this flag will be removed and only OIDC will be used", +} + +func (GitLabIdentityProvider) SwaggerDoc() map[string]string { + return map_GitLabIdentityProvider +} + +var map_GoogleIdentityProvider = map[string]string{ + "": "GoogleIdentityProvider provides identities for users authenticating using Google credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is the oauth client secret", + "hostedDomain": "hostedDomain is the optional Google App domain (e.g. \"mycompany.com\") to restrict logins to", +} + +func (GoogleIdentityProvider) SwaggerDoc() map[string]string { + return map_GoogleIdentityProvider +} + +var map_GrantConfig = map[string]string{ + "": "GrantConfig holds the necessary configuration options for grant handlers", + "method": "method determines the default strategy to use when an OAuth client requests a grant. This method will be used only if the specific OAuth client doesn't provide a strategy of their own. Valid grant handling methods are:\n - auto: always approves grant requests, useful for trusted clients\n - prompt: prompts the end user for approval of grant requests, useful for third-party clients\n - deny: always denies grant requests, useful for black-listed clients", + "serviceAccountMethod": "serviceAccountMethod is used for determining client authorization for service account oauth client. It must be either: deny, prompt", +} + +func (GrantConfig) SwaggerDoc() map[string]string { + return map_GrantConfig +} + +var map_HTPasswdPasswordIdentityProvider = map[string]string{ + "": "HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "file": "file is a reference to your htpasswd file", +} + +func (HTPasswdPasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_HTPasswdPasswordIdentityProvider +} + +var map_IdentityProvider = map[string]string{ + "": "IdentityProvider provides identities for users authenticating using credentials", + "name": "name is used to qualify the identities returned by this provider", + "challenge": "challenge indicates whether to issue WWW-Authenticate challenges for this provider", + "login": "login indicates whether to use this identity provider for unauthenticated browsers to login against", + "mappingMethod": "mappingMethod determines how identities from this provider are mapped to users", + "provider": "provider contains the information about how to set up a specific identity provider", +} + +func (IdentityProvider) SwaggerDoc() map[string]string { + return map_IdentityProvider +} + +var map_KeystonePasswordIdentityProvider = map[string]string{ + "": "KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "domainName": "domainName is required for keystone v3", + "useKeystoneIdentity": "useKeystoneIdentity flag indicates that user should be authenticated by keystone ID, not by username", +} + +func (KeystonePasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_KeystonePasswordIdentityProvider +} + +var map_LDAPAttributeMapping = map[string]string{ + "": "LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields", + "id": "id is the list of attributes whose values should be used as the user ID. Required. LDAP standard identity attribute is \"dn\"", + "preferredUsername": "preferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is \"uid\"", + "name": "name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is \"cn\"", + "email": "email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", +} + +func (LDAPAttributeMapping) SwaggerDoc() map[string]string { + return map_LDAPAttributeMapping +} + +var map_LDAPPasswordIdentityProvider = map[string]string{ + "": "LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "url": "url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is\n ldap://host:port/basedn?attribute?scope?filter", + "bindDN": "bindDN is an optional DN to bind with during the search phase.", + "bindPassword": "bindPassword is an optional password to bind with during the search phase.", + "insecure": "insecure, if true, indicates the connection should not use TLS. Cannot be set to true with a URL scheme of \"ldaps://\" If false, \"ldaps://\" URLs connect using TLS, and \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830", + "ca": "ca is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "attributes": "attributes maps LDAP attributes to identities", +} + +func (LDAPPasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_LDAPPasswordIdentityProvider +} + +var map_OAuthConfig = map[string]string{ + "": "OAuthConfig holds the necessary configuration options for OAuth authentication", + "masterCA": "masterCA is the CA for verifying the TLS connection back to the MasterURL. This field is deprecated and will be removed in a future release. See loginURL for details. Deprecated", + "masterURL": "masterURL is used for making server-to-server calls to exchange authorization codes for access tokens This field is deprecated and will be removed in a future release. See loginURL for details. Deprecated", + "masterPublicURL": "masterPublicURL is used for building valid client redirect URLs for internal and external access This field is deprecated and will be removed in a future release. See loginURL for details. Deprecated", + "loginURL": "loginURL, along with masterCA, masterURL and masterPublicURL have distinct meanings depending on how the OAuth server is run. The two states are: 1. embedded in the kube api server (all 3.x releases) 2. as a standalone external process (all 4.x releases) in the embedded configuration, loginURL is equivalent to masterPublicURL and the other fields have functionality that matches their docs. in the standalone configuration, the fields are used as: loginURL is the URL required to login to the cluster: oc login --server= masterPublicURL is the issuer URL it is accessible from inside (service network) and outside (ingress) of the cluster masterURL is the loopback variation of the token_endpoint URL with no path component it is only accessible from inside (service network) of the cluster masterCA is used to perform TLS verification for connections made to masterURL For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2", + "assetPublicURL": "assetPublicURL is used for building valid client redirect URLs for external access", + "alwaysShowProviderSelection": "alwaysShowProviderSelection will force the provider selection page to render even when there is only a single provider.", + "identityProviders": "identityProviders is an ordered list of ways for a user to identify themselves", + "grantConfig": "grantConfig describes how to handle grants", + "sessionConfig": "sessionConfig hold information about configuring sessions.", + "tokenConfig": "tokenConfig contains options for authorization and access tokens", + "templates": "templates allow you to customize pages like the login page.", +} + +func (OAuthConfig) SwaggerDoc() map[string]string { + return map_OAuthConfig +} + +var map_OAuthTemplates = map[string]string{ + "": "OAuthTemplates allow for customization of pages like the login page", + "login": "login is a path to a file containing a go template used to render the login page. If unspecified, the default login page is used.", + "providerSelection": "providerSelection is a path to a file containing a go template used to render the provider selection page. If unspecified, the default provider selection page is used.", + "error": "error is a path to a file containing a go template used to render error pages during the authentication or grant flow If unspecified, the default error page is used.", +} + +func (OAuthTemplates) SwaggerDoc() map[string]string { + return map_OAuthTemplates +} + +var map_OpenIDClaims = map[string]string{ + "": "OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider", + "id": "id is the list of claims whose values should be used as the user ID. Required. OpenID standard identity claim is \"sub\"", + "preferredUsername": "preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the id claim", + "name": "name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity", + "email": "email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", + "groups": "groups is the list of claims value of which should be used to synchronize groups from the OIDC provider to OpenShift for the user", +} + +func (OpenIDClaims) SwaggerDoc() map[string]string { + return map_OpenIDClaims +} + +var map_OpenIDIdentityProvider = map[string]string{ + "": "OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "ca": "ca is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is the oauth client secret", + "extraScopes": "extraScopes are any scopes to request in addition to the standard \"openid\" scope.", + "extraAuthorizeParameters": "extraAuthorizeParameters are any custom parameters to add to the authorize request.", + "urls": "urls to use to authenticate", + "claims": "claims mappings", +} + +func (OpenIDIdentityProvider) SwaggerDoc() map[string]string { + return map_OpenIDIdentityProvider +} + +var map_OpenIDURLs = map[string]string{ + "": "OpenIDURLs are URLs to use when authenticating with an OpenID identity provider", + "authorize": "authorize is the oauth authorization URL", + "token": "token is the oauth token granting URL", + "userInfo": "userInfo is the optional userinfo URL. If present, a granted access_token is used to request claims If empty, a granted id_token is parsed for claims", +} + +func (OpenIDURLs) SwaggerDoc() map[string]string { + return map_OpenIDURLs +} + +var map_OsinServerConfig = map[string]string{ + "": "Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "oauthConfig": "oauthConfig holds the necessary configuration options for OAuth authentication", +} + +func (OsinServerConfig) SwaggerDoc() map[string]string { + return map_OsinServerConfig +} + +var map_RequestHeaderIdentityProvider = map[string]string{ + "": "RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "loginURL": "loginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}", + "challengeURL": "challengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}", + "clientCA": "clientCA is a file with the trusted signer certs. If empty, no request verification is done, and any direct request to the OAuth server can impersonate any identity from this provider, merely by setting a request header.", + "clientCommonNames": "clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.", + "headers": "headers is the set of headers to check for identity information", + "preferredUsernameHeaders": "preferredUsernameHeaders is the set of headers to check for the preferred username", + "nameHeaders": "nameHeaders is the set of headers to check for the display name", + "emailHeaders": "emailHeaders is the set of headers to check for the email address", +} + +func (RequestHeaderIdentityProvider) SwaggerDoc() map[string]string { + return map_RequestHeaderIdentityProvider +} + +var map_SessionConfig = map[string]string{ + "": "SessionConfig specifies options for cookie-based sessions. Used by AuthRequestHandlerSession", + "sessionSecretsFile": "sessionSecretsFile is a reference to a file containing a serialized SessionSecrets object If no file is specified, a random signing and encryption key are generated at each server start", + "sessionMaxAgeSeconds": "sessionMaxAgeSeconds specifies how long created sessions last. Used by AuthRequestHandlerSession", + "sessionName": "sessionName is the cookie name used to store the session", +} + +func (SessionConfig) SwaggerDoc() map[string]string { + return map_SessionConfig +} + +var map_SessionSecret = map[string]string{ + "": "SessionSecret is a secret used to authenticate/decrypt cookie-based sessions", + "authentication": "authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes.", + "encryption": "encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES-", +} + +func (SessionSecret) SwaggerDoc() map[string]string { + return map_SessionSecret +} + +var map_SessionSecrets = map[string]string{ + "": "SessionSecrets list the secrets to use to sign/encrypt and authenticate/decrypt created sessions.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "secrets": "secrets is a list of secrets New sessions are signed and encrypted using the first secret. Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets.", +} + +func (SessionSecrets) SwaggerDoc() map[string]string { + return map_SessionSecrets +} + +var map_TokenConfig = map[string]string{ + "": "TokenConfig holds the necessary configuration options for authorization and access tokens", + "authorizeTokenMaxAgeSeconds": "authorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens", + "accessTokenMaxAgeSeconds": "accessTokenMaxAgeSeconds defines the maximum age of access tokens", + "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect.", + "accessTokenInactivityTimeout": "accessTokenInactivityTimeout defines the token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Takes valid time duration string such as \"5m\", \"1.5h\" or \"2h45m\". The minimum allowed value for duration is 300s (5 minutes). If the timeout is configured per client, then that value takes precedence. If the timeout value is not specified and the client does not override the value, then tokens are valid until their lifetime.", +} + +func (TokenConfig) SwaggerDoc() map[string]string { + return map_TokenConfig +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/pkg/serialization/serialization.go b/vendor/github.com/openshift/api/pkg/serialization/serialization.go new file mode 100644 index 0000000000000..70c8e7a9943b5 --- /dev/null +++ b/vendor/github.com/openshift/api/pkg/serialization/serialization.go @@ -0,0 +1,45 @@ +package serialization + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// DecodeNestedRawExtensionOrUnknown +func DecodeNestedRawExtensionOrUnknown(d runtime.Decoder, ext *runtime.RawExtension) { + if ext.Raw == nil || ext.Object != nil { + return + } + obj, gvk, err := d.Decode(ext.Raw, nil, nil) + if err != nil { + unk := &runtime.Unknown{Raw: ext.Raw} + if runtime.IsNotRegisteredError(err) { + if _, gvk, err := d.Decode(ext.Raw, nil, unk); err == nil { + unk.APIVersion = gvk.GroupVersion().String() + unk.Kind = gvk.Kind + ext.Object = unk + return + } + } + // TODO: record mime-type with the object + if gvk != nil { + unk.APIVersion = gvk.GroupVersion().String() + unk.Kind = gvk.Kind + } + obj = unk + } + ext.Object = obj +} + +// EncodeNestedRawExtension will encode the object in the RawExtension (if not nil) or +// return an error. +func EncodeNestedRawExtension(e runtime.Encoder, ext *runtime.RawExtension) error { + if ext.Raw != nil || ext.Object == nil { + return nil + } + data, err := runtime.Encode(e, ext.Object) + if err != nil { + return err + } + ext.Raw = data + return nil +} diff --git a/vendor/github.com/openshift/api/project/v1/doc.go b/vendor/github.com/openshift/api/project/v1/doc.go new file mode 100644 index 0000000000000..5bbd9d5ea7a25 --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/project/apis/project +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=project.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/project/v1/generated.pb.go b/vendor/github.com/openshift/api/project/v1/generated.pb.go new file mode 100644 index 0000000000000..822dbbc301320 --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/generated.pb.go @@ -0,0 +1,1305 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/project/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Project) Reset() { *m = Project{} } +func (*Project) ProtoMessage() {} +func (*Project) Descriptor() ([]byte, []int) { + return fileDescriptor_fbf46eaac05029bf, []int{0} +} +func (m *Project) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Project) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Project) XXX_Merge(src proto.Message) { + xxx_messageInfo_Project.Merge(m, src) +} +func (m *Project) XXX_Size() int { + return m.Size() +} +func (m *Project) XXX_DiscardUnknown() { + xxx_messageInfo_Project.DiscardUnknown(m) +} + +var xxx_messageInfo_Project proto.InternalMessageInfo + +func (m *ProjectList) Reset() { *m = ProjectList{} } +func (*ProjectList) ProtoMessage() {} +func (*ProjectList) Descriptor() ([]byte, []int) { + return fileDescriptor_fbf46eaac05029bf, []int{1} +} +func (m *ProjectList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProjectList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectList.Merge(m, src) +} +func (m *ProjectList) XXX_Size() int { + return m.Size() +} +func (m *ProjectList) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectList.DiscardUnknown(m) +} + +var xxx_messageInfo_ProjectList proto.InternalMessageInfo + +func (m *ProjectRequest) Reset() { *m = ProjectRequest{} } +func (*ProjectRequest) ProtoMessage() {} +func (*ProjectRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_fbf46eaac05029bf, []int{2} +} +func (m *ProjectRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProjectRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectRequest.Merge(m, src) +} +func (m *ProjectRequest) XXX_Size() int { + return m.Size() +} +func (m *ProjectRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ProjectRequest proto.InternalMessageInfo + +func (m *ProjectSpec) Reset() { *m = ProjectSpec{} } +func (*ProjectSpec) ProtoMessage() {} +func (*ProjectSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_fbf46eaac05029bf, []int{3} +} +func (m *ProjectSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProjectSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectSpec.Merge(m, src) +} +func (m *ProjectSpec) XXX_Size() int { + return m.Size() +} +func (m *ProjectSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ProjectSpec proto.InternalMessageInfo + +func (m *ProjectStatus) Reset() { *m = ProjectStatus{} } +func (*ProjectStatus) ProtoMessage() {} +func (*ProjectStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_fbf46eaac05029bf, []int{4} +} +func (m *ProjectStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProjectStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectStatus.Merge(m, src) +} +func (m *ProjectStatus) XXX_Size() int { + return m.Size() +} +func (m *ProjectStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ProjectStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Project)(nil), "github.com.openshift.api.project.v1.Project") + proto.RegisterType((*ProjectList)(nil), "github.com.openshift.api.project.v1.ProjectList") + proto.RegisterType((*ProjectRequest)(nil), "github.com.openshift.api.project.v1.ProjectRequest") + proto.RegisterType((*ProjectSpec)(nil), "github.com.openshift.api.project.v1.ProjectSpec") + proto.RegisterType((*ProjectStatus)(nil), "github.com.openshift.api.project.v1.ProjectStatus") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/project/v1/generated.proto", fileDescriptor_fbf46eaac05029bf) +} + +var fileDescriptor_fbf46eaac05029bf = []byte{ + // 573 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0x4d, 0x6f, 0xd3, 0x30, + 0x18, 0xc7, 0x9b, 0x6d, 0x1d, 0xab, 0xcb, 0x26, 0x14, 0x2e, 0x55, 0x0f, 0x69, 0xc9, 0x24, 0xd4, + 0x03, 0x38, 0xb4, 0xbc, 0x88, 0x73, 0x40, 0x88, 0x49, 0xbc, 0x0c, 0x73, 0xab, 0x38, 0xe0, 0xa6, + 0x6e, 0x6a, 0xba, 0xc4, 0x26, 0x76, 0x2b, 0x8d, 0x13, 0x1f, 0x81, 0x3b, 0x9f, 0x83, 0x2b, 0xe7, + 0x1e, 0x77, 0xdc, 0xa9, 0x5a, 0xc3, 0xb7, 0xd8, 0x09, 0xd9, 0x71, 0x93, 0xc0, 0x8a, 0xd4, 0x5d, + 0xb8, 0xd5, 0x4f, 0xfe, 0xbf, 0x9f, 0xed, 0xe7, 0x49, 0x03, 0x1e, 0x86, 0x54, 0x8e, 0xa7, 0x03, + 0x18, 0xb0, 0xc8, 0x63, 0x9c, 0xc4, 0x62, 0x4c, 0x47, 0xd2, 0xc3, 0x9c, 0x7a, 0x3c, 0x61, 0x9f, + 0x48, 0x20, 0xbd, 0x59, 0xd7, 0x0b, 0x49, 0x4c, 0x12, 0x2c, 0xc9, 0x10, 0xf2, 0x84, 0x49, 0x66, + 0x1f, 0x16, 0x10, 0xcc, 0x21, 0x88, 0x39, 0x85, 0x06, 0x82, 0xb3, 0x6e, 0xf3, 0x7e, 0xc9, 0x1c, + 0xb2, 0x90, 0x79, 0x9a, 0x1d, 0x4c, 0x47, 0x7a, 0xa5, 0x17, 0xfa, 0x57, 0xe6, 0x6c, 0xba, 0x93, + 0xa7, 0x02, 0x52, 0xa6, 0xb7, 0x0e, 0x58, 0x42, 0xd6, 0xec, 0xdb, 0x7c, 0x54, 0x64, 0x22, 0x1c, + 0x8c, 0x69, 0x4c, 0x92, 0x53, 0x8f, 0x4f, 0x42, 0x55, 0x10, 0x5e, 0x44, 0x24, 0x5e, 0x47, 0x3d, + 0xf9, 0x17, 0x95, 0x4c, 0x63, 0x49, 0x23, 0xe2, 0x89, 0x60, 0x4c, 0x22, 0xfc, 0x37, 0xe7, 0x7e, + 0xdf, 0x02, 0x37, 0x8e, 0xb3, 0xfb, 0xd8, 0x1f, 0xc1, 0x9e, 0xd2, 0x0f, 0xb1, 0xc4, 0x0d, 0xab, + 0x6d, 0x75, 0xea, 0xbd, 0x07, 0x30, 0xd3, 0xc2, 0xb2, 0x16, 0xf2, 0x49, 0xa8, 0x0a, 0x02, 0xaa, + 0x34, 0x9c, 0x75, 0xe1, 0xdb, 0x81, 0xe2, 0x5f, 0x13, 0x89, 0x7d, 0x7b, 0xbe, 0x68, 0x55, 0xd2, + 0x45, 0x0b, 0x14, 0x35, 0x94, 0x5b, 0x6d, 0x04, 0x76, 0x04, 0x27, 0x41, 0x63, 0xcb, 0xd8, 0x37, + 0x68, 0x31, 0x34, 0xa7, 0x7b, 0xcf, 0x49, 0xe0, 0xdf, 0x34, 0xf6, 0x1d, 0xb5, 0x42, 0xda, 0x65, + 0xf7, 0xc1, 0xae, 0x90, 0x58, 0x4e, 0x45, 0x63, 0x5b, 0x5b, 0x7b, 0xd7, 0xb2, 0x6a, 0xd2, 0x3f, + 0x30, 0xde, 0xdd, 0x6c, 0x8d, 0x8c, 0xd1, 0xfd, 0x69, 0x81, 0xba, 0x49, 0xbe, 0xa2, 0x42, 0xda, + 0x1f, 0xae, 0x74, 0x08, 0x6e, 0xd6, 0x21, 0x45, 0xeb, 0xfe, 0xdc, 0x32, 0x3b, 0xed, 0xad, 0x2a, + 0xa5, 0xee, 0xbc, 0x03, 0x55, 0x2a, 0x49, 0x24, 0x1a, 0x5b, 0xed, 0xed, 0x4e, 0xbd, 0x77, 0xef, + 0x3a, 0x17, 0xf1, 0xf7, 0x8d, 0xb8, 0x7a, 0xa4, 0x14, 0x28, 0x33, 0xb9, 0x17, 0x16, 0x38, 0x30, + 0x09, 0x44, 0x3e, 0x4f, 0x89, 0xf8, 0x1f, 0x53, 0x7e, 0x0c, 0xea, 0x43, 0x2a, 0xf8, 0x09, 0x3e, + 0x7d, 0x83, 0x23, 0xa2, 0x87, 0x5d, 0xf3, 0x6f, 0x1b, 0xa4, 0xfe, 0xbc, 0x78, 0x84, 0xca, 0x39, + 0x8d, 0x11, 0x11, 0x24, 0x94, 0x4b, 0xca, 0x62, 0x3d, 0xcd, 0x32, 0x56, 0x3c, 0x42, 0xe5, 0x9c, + 0x8b, 0xf3, 0x11, 0xa9, 0x97, 0xc2, 0x46, 0x00, 0x8c, 0x68, 0x8c, 0x4f, 0xe8, 0x17, 0x92, 0x88, + 0x86, 0xd5, 0xde, 0xee, 0xd4, 0xfc, 0x9e, 0x3a, 0xea, 0x8b, 0xbc, 0x7a, 0xb9, 0x68, 0xb5, 0xaf, + 0xfe, 0x11, 0x61, 0x1e, 0xd0, 0x47, 0x2b, 0x59, 0xdc, 0x1f, 0x16, 0xd8, 0xff, 0xe3, 0x85, 0xb1, + 0x5f, 0x82, 0x2a, 0x1f, 0x63, 0x41, 0x74, 0x07, 0x6b, 0x7e, 0x6f, 0xd5, 0xfc, 0x63, 0x55, 0xbc, + 0x5c, 0xb4, 0xee, 0xac, 0xf1, 0x2b, 0xad, 0xe0, 0x38, 0x20, 0x3a, 0x84, 0x32, 0x81, 0xdd, 0x07, + 0x20, 0x60, 0xf1, 0x90, 0xaa, 0xbb, 0xac, 0x26, 0x7f, 0xb7, 0x34, 0x10, 0xa8, 0x70, 0x58, 0xc6, + 0x9f, 0xad, 0xe2, 0xc5, 0x18, 0xf2, 0x92, 0x40, 0x25, 0x9b, 0x7f, 0x34, 0x5f, 0x3a, 0x95, 0xb3, + 0xa5, 0x53, 0x39, 0x5f, 0x3a, 0x95, 0xaf, 0xa9, 0x63, 0xcd, 0x53, 0xc7, 0x3a, 0x4b, 0x1d, 0xeb, + 0x3c, 0x75, 0xac, 0x8b, 0xd4, 0xb1, 0xbe, 0xfd, 0x72, 0x2a, 0xfd, 0xc3, 0x0d, 0xbe, 0x8e, 0xbf, + 0x03, 0x00, 0x00, 0xff, 0xff, 0xb3, 0x9b, 0x1f, 0xba, 0x43, 0x05, 0x00, 0x00, +} + +func (m *Project) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Project) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Project) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ProjectList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ProjectRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x1a + i -= len(m.DisplayName) + copy(dAtA[i:], m.DisplayName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DisplayName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ProjectSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Finalizers) > 0 { + for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Finalizers[iNdEx]) + copy(dAtA[i:], m.Finalizers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Finalizers[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ProjectStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Project) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ProjectList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ProjectRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DisplayName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ProjectSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ProjectStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Project) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Project{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ProjectSpec", "ProjectSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ProjectStatus", "ProjectStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Project{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Project", "Project", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ProjectList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ProjectRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProjectRequest{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DisplayName:` + fmt.Sprintf("%v", this.DisplayName) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProjectSpec{`, + `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]NamespaceCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ProjectStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Project) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Project: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Project: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Project{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DisplayName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DisplayName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Finalizers = append(m.Finalizers, k8s_io_api_core_v1.FinalizerName(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = k8s_io_api_core_v1.NamespacePhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v11.NamespaceCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/project/v1/generated.proto b/vendor/github.com/openshift/api/project/v1/generated.proto new file mode 100644 index 0000000000000..d1ffbc341bfb1 --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/generated.proto @@ -0,0 +1,90 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.project.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/project/v1"; + +// Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members, +// a quota on the resources that the project may consume, and the security controls on the resources in +// the project. Within a project, members may have different roles - project administrators can set +// membership, editors can create and manage the resources, and viewers can see but not access running +// containers. In a normal cluster project administrators are not able to alter their quotas - that is +// restricted to cluster administrators. +// +// Listing or watching projects will return only projects the user has the reader role on. +// +// An OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed +// as editable to end users while namespaces are not. Direct creation of a project is typically restricted +// to administrators, while end users should use the requestproject resource. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message Project { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec defines the behavior of the Namespace. + optional ProjectSpec spec = 2; + + // status describes the current status of a Namespace + // +optional + optional ProjectStatus status = 3; +} + +// ProjectList is a list of Project objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ProjectList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of projects + repeated Project items = 2; +} + +// ProjectRequest is the set of options necessary to fully qualify a project request +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ProjectRequest { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // displayName is the display name to apply to a project + optional string displayName = 2; + + // description is the description to apply to a project + optional string description = 3; +} + +// ProjectSpec describes the attributes on a Project +message ProjectSpec { + // finalizers is an opaque list of values that must be empty to permanently remove object from storage + repeated string finalizers = 1; +} + +// ProjectStatus is information about the current status of a Project +message ProjectStatus { + // phase is the current lifecycle phase of the project + // +optional + optional string phase = 1; + + // Represents the latest available observations of the project current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated .k8s.io.api.core.v1.NamespaceCondition conditions = 2; +} + diff --git a/vendor/github.com/openshift/api/project/v1/legacy.go b/vendor/github.com/openshift/api/project/v1/legacy.go new file mode 100644 index 0000000000000..186f905f3a8eb --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/legacy.go @@ -0,0 +1,23 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &Project{}, + &ProjectList{}, + &ProjectRequest{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/project/v1/register.go b/vendor/github.com/openshift/api/project/v1/register.go new file mode 100644 index 0000000000000..e471716ce8e2a --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/register.go @@ -0,0 +1,40 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "project.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &Project{}, + &ProjectList{}, + &ProjectRequest{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/project/v1/types.go b/vendor/github.com/openshift/api/project/v1/types.go new file mode 100644 index 0000000000000..5e69b775b53f3 --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/types.go @@ -0,0 +1,111 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ProjectList is a list of Project objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ProjectList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of projects + Items []Project `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +const ( + // These are internal finalizer values to Origin + FinalizerOrigin corev1.FinalizerName = "openshift.io/origin" + // ProjectNodeSelector is an annotation that holds the node selector; + // the node selector annotation determines which nodes will have pods from this project scheduled to them + ProjectNodeSelector = "openshift.io/node-selector" + + // ProjectRequesterAnnotation is the username that requested a given project. Its not guaranteed to be present, + // but it is set by the default project template. + ProjectRequesterAnnotation = "openshift.io/requester" +) + +// ProjectSpec describes the attributes on a Project +type ProjectSpec struct { + // finalizers is an opaque list of values that must be empty to permanently remove object from storage + Finalizers []corev1.FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=k8s.io/api/core/v1.FinalizerName"` +} + +// ProjectStatus is information about the current status of a Project +type ProjectStatus struct { + // phase is the current lifecycle phase of the project + // +optional + Phase corev1.NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=k8s.io/api/core/v1.NamespacePhase"` + + // Represents the latest available observations of the project current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []corev1.NamespaceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members, +// a quota on the resources that the project may consume, and the security controls on the resources in +// the project. Within a project, members may have different roles - project administrators can set +// membership, editors can create and manage the resources, and viewers can see but not access running +// containers. In a normal cluster project administrators are not able to alter their quotas - that is +// restricted to cluster administrators. +// +// Listing or watching projects will return only projects the user has the reader role on. +// +// An OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed +// as editable to end users while namespaces are not. Direct creation of a project is typically restricted +// to administrators, while end users should use the requestproject resource. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Project struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec defines the behavior of the Namespace. + Spec ProjectSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // status describes the current status of a Namespace + // +optional + Status ProjectStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient +// +genclient:nonNamespaced +// +genclient:skipVerbs=get,list,create,update,patch,delete,deleteCollection,watch +// +genclient:method=Create,verb=create,result=Project + +// ProjectRequest is the set of options necessary to fully qualify a project request +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ProjectRequest struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // displayName is the display name to apply to a project + DisplayName string `json:"displayName,omitempty" protobuf:"bytes,2,opt,name=displayName"` + // description is the description to apply to a project + Description string `json:"description,omitempty" protobuf:"bytes,3,opt,name=description"` +} diff --git a/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..ddbdda971dc25 --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go @@ -0,0 +1,142 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Project) DeepCopyInto(out *Project) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project. +func (in *Project) DeepCopy() *Project { + if in == nil { + return nil + } + out := new(Project) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Project) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectList) DeepCopyInto(out *ProjectList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Project, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList. +func (in *ProjectList) DeepCopy() *ProjectList { + if in == nil { + return nil + } + out := new(ProjectList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectRequest) DeepCopyInto(out *ProjectRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectRequest. +func (in *ProjectRequest) DeepCopy() *ProjectRequest { + if in == nil { + return nil + } + out := new(ProjectRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) { + *out = *in + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]corev1.FinalizerName, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec. +func (in *ProjectSpec) DeepCopy() *ProjectSpec { + if in == nil { + return nil + } + out := new(ProjectSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]corev1.NamespaceCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus. +func (in *ProjectStatus) DeepCopy() *ProjectStatus { + if in == nil { + return nil + } + out := new(ProjectStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000..b764eaface177 --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,65 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Project = map[string]string{ + "": "Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members, a quota on the resources that the project may consume, and the security controls on the resources in the project. Within a project, members may have different roles - project administrators can set membership, editors can create and manage the resources, and viewers can see but not access running containers. In a normal cluster project administrators are not able to alter their quotas - that is restricted to cluster administrators.\n\nListing or watching projects will return only projects the user has the reader role on.\n\nAn OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed as editable to end users while namespaces are not. Direct creation of a project is typically restricted to administrators, while end users should use the requestproject resource.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec defines the behavior of the Namespace.", + "status": "status describes the current status of a Namespace", +} + +func (Project) SwaggerDoc() map[string]string { + return map_Project +} + +var map_ProjectList = map[string]string{ + "": "ProjectList is a list of Project objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of projects", +} + +func (ProjectList) SwaggerDoc() map[string]string { + return map_ProjectList +} + +var map_ProjectRequest = map[string]string{ + "": "ProjectRequest is the set of options necessary to fully qualify a project request\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "displayName": "displayName is the display name to apply to a project", + "description": "description is the description to apply to a project", +} + +func (ProjectRequest) SwaggerDoc() map[string]string { + return map_ProjectRequest +} + +var map_ProjectSpec = map[string]string{ + "": "ProjectSpec describes the attributes on a Project", + "finalizers": "finalizers is an opaque list of values that must be empty to permanently remove object from storage", +} + +func (ProjectSpec) SwaggerDoc() map[string]string { + return map_ProjectSpec +} + +var map_ProjectStatus = map[string]string{ + "": "ProjectStatus is information about the current status of a Project", + "phase": "phase is the current lifecycle phase of the project", + "conditions": "Represents the latest available observations of the project current state.", +} + +func (ProjectStatus) SwaggerDoc() map[string]string { + return map_ProjectStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/quota/v1/Makefile b/vendor/github.com/openshift/api/quota/v1/Makefile new file mode 100644 index 0000000000000..691859dd828ce --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="quota.openshift.io/v1" diff --git a/vendor/github.com/openshift/api/quota/v1/doc.go b/vendor/github.com/openshift/api/quota/v1/doc.go new file mode 100644 index 0000000000000..ae5c9c2c762ae --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/quota/apis/quota +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=quota.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/quota/v1/generated.pb.go b/vendor/github.com/openshift/api/quota/v1/generated.pb.go new file mode 100644 index 0000000000000..7556462cffa37 --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/generated.pb.go @@ -0,0 +1,2152 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/quota/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AppliedClusterResourceQuota) Reset() { *m = AppliedClusterResourceQuota{} } +func (*AppliedClusterResourceQuota) ProtoMessage() {} +func (*AppliedClusterResourceQuota) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{0} +} +func (m *AppliedClusterResourceQuota) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AppliedClusterResourceQuota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AppliedClusterResourceQuota) XXX_Merge(src proto.Message) { + xxx_messageInfo_AppliedClusterResourceQuota.Merge(m, src) +} +func (m *AppliedClusterResourceQuota) XXX_Size() int { + return m.Size() +} +func (m *AppliedClusterResourceQuota) XXX_DiscardUnknown() { + xxx_messageInfo_AppliedClusterResourceQuota.DiscardUnknown(m) +} + +var xxx_messageInfo_AppliedClusterResourceQuota proto.InternalMessageInfo + +func (m *AppliedClusterResourceQuotaList) Reset() { *m = AppliedClusterResourceQuotaList{} } +func (*AppliedClusterResourceQuotaList) ProtoMessage() {} +func (*AppliedClusterResourceQuotaList) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{1} +} +func (m *AppliedClusterResourceQuotaList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AppliedClusterResourceQuotaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AppliedClusterResourceQuotaList) XXX_Merge(src proto.Message) { + xxx_messageInfo_AppliedClusterResourceQuotaList.Merge(m, src) +} +func (m *AppliedClusterResourceQuotaList) XXX_Size() int { + return m.Size() +} +func (m *AppliedClusterResourceQuotaList) XXX_DiscardUnknown() { + xxx_messageInfo_AppliedClusterResourceQuotaList.DiscardUnknown(m) +} + +var xxx_messageInfo_AppliedClusterResourceQuotaList proto.InternalMessageInfo + +func (m *ClusterResourceQuota) Reset() { *m = ClusterResourceQuota{} } +func (*ClusterResourceQuota) ProtoMessage() {} +func (*ClusterResourceQuota) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{2} +} +func (m *ClusterResourceQuota) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterResourceQuota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterResourceQuota) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterResourceQuota.Merge(m, src) +} +func (m *ClusterResourceQuota) XXX_Size() int { + return m.Size() +} +func (m *ClusterResourceQuota) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterResourceQuota.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterResourceQuota proto.InternalMessageInfo + +func (m *ClusterResourceQuotaList) Reset() { *m = ClusterResourceQuotaList{} } +func (*ClusterResourceQuotaList) ProtoMessage() {} +func (*ClusterResourceQuotaList) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{3} +} +func (m *ClusterResourceQuotaList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterResourceQuotaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterResourceQuotaList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterResourceQuotaList.Merge(m, src) +} +func (m *ClusterResourceQuotaList) XXX_Size() int { + return m.Size() +} +func (m *ClusterResourceQuotaList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterResourceQuotaList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterResourceQuotaList proto.InternalMessageInfo + +func (m *ClusterResourceQuotaSelector) Reset() { *m = ClusterResourceQuotaSelector{} } +func (*ClusterResourceQuotaSelector) ProtoMessage() {} +func (*ClusterResourceQuotaSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{4} +} +func (m *ClusterResourceQuotaSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterResourceQuotaSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterResourceQuotaSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterResourceQuotaSelector.Merge(m, src) +} +func (m *ClusterResourceQuotaSelector) XXX_Size() int { + return m.Size() +} +func (m *ClusterResourceQuotaSelector) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterResourceQuotaSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterResourceQuotaSelector proto.InternalMessageInfo + +func (m *ClusterResourceQuotaSpec) Reset() { *m = ClusterResourceQuotaSpec{} } +func (*ClusterResourceQuotaSpec) ProtoMessage() {} +func (*ClusterResourceQuotaSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{5} +} +func (m *ClusterResourceQuotaSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterResourceQuotaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterResourceQuotaSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterResourceQuotaSpec.Merge(m, src) +} +func (m *ClusterResourceQuotaSpec) XXX_Size() int { + return m.Size() +} +func (m *ClusterResourceQuotaSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterResourceQuotaSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterResourceQuotaSpec proto.InternalMessageInfo + +func (m *ClusterResourceQuotaStatus) Reset() { *m = ClusterResourceQuotaStatus{} } +func (*ClusterResourceQuotaStatus) ProtoMessage() {} +func (*ClusterResourceQuotaStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{6} +} +func (m *ClusterResourceQuotaStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterResourceQuotaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterResourceQuotaStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterResourceQuotaStatus.Merge(m, src) +} +func (m *ClusterResourceQuotaStatus) XXX_Size() int { + return m.Size() +} +func (m *ClusterResourceQuotaStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterResourceQuotaStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterResourceQuotaStatus proto.InternalMessageInfo + +func (m *ResourceQuotaStatusByNamespace) Reset() { *m = ResourceQuotaStatusByNamespace{} } +func (*ResourceQuotaStatusByNamespace) ProtoMessage() {} +func (*ResourceQuotaStatusByNamespace) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{7} +} +func (m *ResourceQuotaStatusByNamespace) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceQuotaStatusByNamespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceQuotaStatusByNamespace) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceQuotaStatusByNamespace.Merge(m, src) +} +func (m *ResourceQuotaStatusByNamespace) XXX_Size() int { + return m.Size() +} +func (m *ResourceQuotaStatusByNamespace) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceQuotaStatusByNamespace.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceQuotaStatusByNamespace proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AppliedClusterResourceQuota)(nil), "github.com.openshift.api.quota.v1.AppliedClusterResourceQuota") + proto.RegisterType((*AppliedClusterResourceQuotaList)(nil), "github.com.openshift.api.quota.v1.AppliedClusterResourceQuotaList") + proto.RegisterType((*ClusterResourceQuota)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuota") + proto.RegisterType((*ClusterResourceQuotaList)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaList") + proto.RegisterType((*ClusterResourceQuotaSelector)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaSelector") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaSelector.AnnotationsEntry") + proto.RegisterType((*ClusterResourceQuotaSpec)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaSpec") + proto.RegisterType((*ClusterResourceQuotaStatus)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaStatus") + proto.RegisterType((*ResourceQuotaStatusByNamespace)(nil), "github.com.openshift.api.quota.v1.ResourceQuotaStatusByNamespace") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/quota/v1/generated.proto", fileDescriptor_f605e5b8440aecb8) +} + +var fileDescriptor_f605e5b8440aecb8 = []byte{ + // 716 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x41, 0x6f, 0xd3, 0x3e, + 0x1c, 0x6d, 0xba, 0x75, 0x5a, 0xbd, 0xff, 0xfe, 0xda, 0xac, 0x1d, 0xaa, 0x82, 0xd2, 0x2d, 0x12, + 0x62, 0x17, 0x1c, 0x3a, 0x10, 0x4c, 0x20, 0x86, 0x16, 0x84, 0x10, 0x68, 0x30, 0x08, 0x9c, 0xd0, + 0x40, 0xb8, 0x99, 0xd7, 0x86, 0x26, 0x71, 0x88, 0x9d, 0x4a, 0xbd, 0xf1, 0x09, 0x10, 0x9f, 0x81, + 0x0f, 0xc2, 0x0d, 0x69, 0x37, 0x76, 0x01, 0xed, 0x34, 0xd1, 0xc0, 0x07, 0x41, 0x76, 0xdc, 0xa4, + 0xdb, 0xda, 0xad, 0x6c, 0x07, 0x2e, 0xdc, 0xe2, 0x5f, 0xfd, 0xde, 0xfb, 0xfd, 0x5e, 0x9e, 0xdd, + 0x80, 0x7a, 0xd3, 0xe5, 0xad, 0xb8, 0x81, 0x1c, 0xea, 0x9b, 0x34, 0x24, 0x01, 0x6b, 0xb9, 0x3b, + 0xdc, 0xc4, 0xa1, 0x6b, 0xbe, 0x8b, 0x29, 0xc7, 0x66, 0xa7, 0x6e, 0x36, 0x49, 0x40, 0x22, 0xcc, + 0xc9, 0x36, 0x0a, 0x23, 0xca, 0x29, 0x5c, 0xca, 0x21, 0x28, 0x83, 0x20, 0x1c, 0xba, 0x48, 0x42, + 0x50, 0xa7, 0x5e, 0xbd, 0x32, 0xc0, 0xda, 0xa4, 0x4d, 0x6a, 0x4a, 0x64, 0x23, 0xde, 0x91, 0x2b, + 0xb9, 0x90, 0x4f, 0x29, 0x63, 0xd5, 0x68, 0xaf, 0x32, 0xe4, 0x52, 0x29, 0xeb, 0xd0, 0x88, 0x0c, + 0x51, 0xad, 0x5e, 0xcf, 0xf7, 0xf8, 0xd8, 0x69, 0xb9, 0x01, 0x89, 0xba, 0x66, 0xd8, 0x6e, 0x8a, + 0x02, 0x33, 0x7d, 0x32, 0xb4, 0xd7, 0xea, 0x8d, 0x51, 0xa8, 0x28, 0x0e, 0xb8, 0xeb, 0x13, 0x93, + 0x39, 0x2d, 0xe2, 0xe3, 0xa3, 0x38, 0xe3, 0x4b, 0x11, 0x5c, 0x58, 0x0f, 0x43, 0xcf, 0x25, 0xdb, + 0xf7, 0xbc, 0x98, 0x71, 0x12, 0xd9, 0x84, 0xd1, 0x38, 0x72, 0xc8, 0x33, 0x31, 0x23, 0x7c, 0x03, + 0xa6, 0x85, 0xe4, 0x36, 0xe6, 0xb8, 0xa2, 0x2d, 0x6a, 0xcb, 0x33, 0x2b, 0x57, 0x51, 0x2a, 0x85, + 0x06, 0xa5, 0x50, 0xd8, 0x6e, 0x8a, 0x02, 0x43, 0x62, 0x37, 0xea, 0xd4, 0xd1, 0x66, 0xe3, 0x2d, + 0x71, 0xf8, 0x63, 0xc2, 0xb1, 0x05, 0x77, 0x0f, 0x6a, 0x85, 0xe4, 0xa0, 0x06, 0xf2, 0x9a, 0x9d, + 0xb1, 0xc2, 0x57, 0x60, 0x92, 0x85, 0xc4, 0xa9, 0x14, 0x25, 0xfb, 0x6d, 0x74, 0xaa, 0xe9, 0x68, + 0x58, 0xa3, 0xcf, 0x43, 0xe2, 0x58, 0xff, 0x29, 0xa1, 0x49, 0xb1, 0xb2, 0x25, 0x2d, 0x24, 0x60, + 0x8a, 0x71, 0xcc, 0x63, 0x56, 0x99, 0x90, 0x02, 0x77, 0xce, 0x2a, 0x20, 0x49, 0xac, 0xff, 0x95, + 0xc4, 0x54, 0xba, 0xb6, 0x15, 0xb9, 0xf1, 0x4b, 0x03, 0xb5, 0x13, 0x7c, 0xdc, 0x70, 0x19, 0x87, + 0x5b, 0xc7, 0xbc, 0x44, 0xe3, 0x79, 0x29, 0xd0, 0xd2, 0xc9, 0x39, 0xa5, 0x3e, 0xdd, 0xaf, 0x0c, + 0xf8, 0xe8, 0x80, 0x92, 0xcb, 0x89, 0xcf, 0x2a, 0xc5, 0xc5, 0x89, 0xe5, 0x99, 0x95, 0xb5, 0x31, + 0xe6, 0x3c, 0xa1, 0x61, 0x6b, 0x56, 0x49, 0x95, 0x1e, 0x0a, 0x52, 0x3b, 0xe5, 0x36, 0x3e, 0x17, + 0xc1, 0xc2, 0xbf, 0x9c, 0x9c, 0x23, 0x27, 0xdf, 0x35, 0x50, 0xf9, 0x4b, 0x01, 0xd9, 0x3a, 0x1c, + 0x90, 0x9b, 0x67, 0x1c, 0x70, 0x44, 0x32, 0xbe, 0x16, 0xc1, 0xc5, 0xa1, 0x7e, 0x10, 0x8f, 0x38, + 0x9c, 0x46, 0xf0, 0x35, 0x98, 0xf2, 0x70, 0x83, 0x78, 0x4c, 0x8d, 0x76, 0x6d, 0xcc, 0xd1, 0x04, + 0xa6, 0x4f, 0x62, 0xcd, 0x27, 0x07, 0xb5, 0xd9, 0x43, 0x25, 0x5b, 0xb1, 0xc2, 0x0f, 0x1a, 0x98, + 0xc1, 0x41, 0x40, 0x39, 0xe6, 0x2e, 0x0d, 0xfa, 0x53, 0x3e, 0x3d, 0xeb, 0x6b, 0x54, 0xf4, 0x68, + 0x3d, 0xa7, 0xbc, 0x1f, 0xf0, 0xa8, 0x6b, 0x55, 0xd5, 0xf8, 0x30, 0xff, 0x25, 0xeb, 0x65, 0xb0, + 0x81, 0xea, 0x1a, 0x98, 0x3b, 0x0a, 0x86, 0x73, 0x60, 0xa2, 0x4d, 0xba, 0xd2, 0x81, 0xb2, 0x2d, + 0x1e, 0xe1, 0x02, 0x28, 0x75, 0xb0, 0x17, 0x13, 0x99, 0xeb, 0xb2, 0x9d, 0x2e, 0x6e, 0x15, 0x57, + 0x35, 0xe3, 0xdb, 0x88, 0xa8, 0x88, 0xd0, 0x42, 0x1f, 0x4c, 0x33, 0xa5, 0xaa, 0xfc, 0xbc, 0x7b, + 0xce, 0x49, 0xf3, 0xec, 0x64, 0xe3, 0x64, 0x12, 0xf0, 0x11, 0x28, 0x49, 0x12, 0x75, 0xfa, 0x2e, + 0x0d, 0xbc, 0x3b, 0x24, 0xfe, 0xc8, 0x04, 0xf9, 0xf1, 0x73, 0x96, 0x25, 0x45, 0x96, 0xec, 0x94, + 0xc2, 0xe8, 0x69, 0xa0, 0x3a, 0xfa, 0xe4, 0xc0, 0x0d, 0x50, 0xe2, 0x94, 0x63, 0x4f, 0x8d, 0x75, + 0xf9, 0x74, 0xa9, 0xf4, 0xc4, 0x65, 0x62, 0x2f, 0x04, 0xda, 0x4e, 0x49, 0x60, 0x0c, 0x40, 0x80, + 0x7d, 0xc2, 0x42, 0xec, 0x90, 0x7e, 0x26, 0xd6, 0xc7, 0x70, 0x6a, 0x98, 0x42, 0xf7, 0x49, 0x9f, + 0x29, 0xbf, 0xaa, 0xb2, 0x12, 0xb3, 0x07, 0x84, 0x8c, 0x4f, 0x1a, 0xd0, 0x4f, 0xa6, 0x80, 0x26, + 0x28, 0x67, 0x80, 0x34, 0x10, 0xd6, 0xbc, 0x62, 0x2d, 0x67, 0xbb, 0xec, 0x7c, 0x0f, 0xdc, 0xcc, + 0x6e, 0xa8, 0xe2, 0x9f, 0x39, 0x33, 0xe2, 0x2e, 0xb2, 0x1e, 0xec, 0xf6, 0xf4, 0xc2, 0x5e, 0x4f, + 0x2f, 0xec, 0xf7, 0xf4, 0xc2, 0xfb, 0x44, 0xd7, 0x76, 0x13, 0x5d, 0xdb, 0x4b, 0x74, 0x6d, 0x3f, + 0xd1, 0xb5, 0x1f, 0x89, 0xae, 0x7d, 0xfc, 0xa9, 0x17, 0x5e, 0x2e, 0x9d, 0xfa, 0xe1, 0xf4, 0x3b, + 0x00, 0x00, 0xff, 0xff, 0xda, 0x49, 0x50, 0x7b, 0x5c, 0x09, 0x00, 0x00, +} + +func (m *AppliedClusterResourceQuota) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AppliedClusterResourceQuota) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AppliedClusterResourceQuota) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AppliedClusterResourceQuotaList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AppliedClusterResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AppliedClusterResourceQuotaList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterResourceQuota) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterResourceQuota) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterResourceQuota) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterResourceQuotaList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterResourceQuotaList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterResourceQuotaSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterResourceQuotaSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterResourceQuotaSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AnnotationSelector) > 0 { + keysForAnnotationSelector := make([]string, 0, len(m.AnnotationSelector)) + for k := range m.AnnotationSelector { + keysForAnnotationSelector = append(keysForAnnotationSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotationSelector) + for iNdEx := len(keysForAnnotationSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.AnnotationSelector[string(keysForAnnotationSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotationSelector[iNdEx]) + copy(dAtA[i:], keysForAnnotationSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotationSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterResourceQuotaSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterResourceQuotaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Quota.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterResourceQuotaStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterResourceQuotaStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Namespaces[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Total.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceQuotaStatusByNamespace) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceQuotaStatusByNamespace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceQuotaStatusByNamespace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AppliedClusterResourceQuota) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AppliedClusterResourceQuotaList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterResourceQuota) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterResourceQuotaList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterResourceQuotaSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AnnotationSelector) > 0 { + for k, v := range m.AnnotationSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ClusterResourceQuotaSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Quota.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterResourceQuotaStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Total.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Namespaces) > 0 { + for _, e := range m.Namespaces { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceQuotaStatusByNamespace) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AppliedClusterResourceQuota) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AppliedClusterResourceQuota{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterResourceQuotaSpec", "ClusterResourceQuotaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ClusterResourceQuotaStatus", "ClusterResourceQuotaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *AppliedClusterResourceQuotaList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]AppliedClusterResourceQuota{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "AppliedClusterResourceQuota", "AppliedClusterResourceQuota", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&AppliedClusterResourceQuotaList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterResourceQuota) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterResourceQuota{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterResourceQuotaSpec", "ClusterResourceQuotaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ClusterResourceQuotaStatus", "ClusterResourceQuotaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterResourceQuotaList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterResourceQuota{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterResourceQuota", "ClusterResourceQuota", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterResourceQuotaList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterResourceQuotaSelector) String() string { + if this == nil { + return "nil" + } + keysForAnnotationSelector := make([]string, 0, len(this.AnnotationSelector)) + for k := range this.AnnotationSelector { + keysForAnnotationSelector = append(keysForAnnotationSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotationSelector) + mapStringForAnnotationSelector := "map[string]string{" + for _, k := range keysForAnnotationSelector { + mapStringForAnnotationSelector += fmt.Sprintf("%v: %v,", k, this.AnnotationSelector[k]) + } + mapStringForAnnotationSelector += "}" + s := strings.Join([]string{`&ClusterResourceQuotaSelector{`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `AnnotationSelector:` + mapStringForAnnotationSelector + `,`, + `}`, + }, "") + return s +} +func (this *ClusterResourceQuotaSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterResourceQuotaSpec{`, + `Selector:` + strings.Replace(strings.Replace(this.Selector.String(), "ClusterResourceQuotaSelector", "ClusterResourceQuotaSelector", 1), `&`, ``, 1) + `,`, + `Quota:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Quota), "ResourceQuotaSpec", "v11.ResourceQuotaSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterResourceQuotaStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForNamespaces := "[]ResourceQuotaStatusByNamespace{" + for _, f := range this.Namespaces { + repeatedStringForNamespaces += strings.Replace(strings.Replace(f.String(), "ResourceQuotaStatusByNamespace", "ResourceQuotaStatusByNamespace", 1), `&`, ``, 1) + "," + } + repeatedStringForNamespaces += "}" + s := strings.Join([]string{`&ClusterResourceQuotaStatus{`, + `Total:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Total), "ResourceQuotaStatus", "v11.ResourceQuotaStatus", 1), `&`, ``, 1) + `,`, + `Namespaces:` + repeatedStringForNamespaces + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaStatusByNamespace) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceQuotaStatusByNamespace{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Status:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Status), "ResourceQuotaStatus", "v11.ResourceQuotaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AppliedClusterResourceQuota) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AppliedClusterResourceQuota: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AppliedClusterResourceQuota: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AppliedClusterResourceQuotaList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AppliedClusterResourceQuotaList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AppliedClusterResourceQuotaList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, AppliedClusterResourceQuota{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterResourceQuota) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterResourceQuota: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterResourceQuota: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterResourceQuotaList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterResourceQuotaList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterResourceQuotaList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterResourceQuota{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterResourceQuotaSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterResourceQuotaSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterResourceQuotaSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &v1.LabelSelector{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AnnotationSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AnnotationSelector == nil { + m.AnnotationSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AnnotationSelector[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterResourceQuotaSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterResourceQuotaSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterResourceQuotaSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quota", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Quota.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterResourceQuotaStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterResourceQuotaStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterResourceQuotaStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Total.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, ResourceQuotaStatusByNamespace{}) + if err := m.Namespaces[len(m.Namespaces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuotaStatusByNamespace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuotaStatusByNamespace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuotaStatusByNamespace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/quota/v1/generated.proto b/vendor/github.com/openshift/api/quota/v1/generated.proto new file mode 100644 index 0000000000000..fb7fed242a86e --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/generated.proto @@ -0,0 +1,130 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.quota.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/quota/v1"; + +// AppliedClusterResourceQuota mirrors ClusterResourceQuota at a project scope, for projection +// into a project. It allows a project-admin to know which ClusterResourceQuotas are applied to +// his project and their associated usage. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message AppliedClusterResourceQuota { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec defines the desired quota + optional ClusterResourceQuotaSpec spec = 2; + + // status defines the actual enforced quota and its current usage + optional ClusterResourceQuotaStatus status = 3; +} + +// AppliedClusterResourceQuotaList is a collection of AppliedClusterResourceQuotas +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message AppliedClusterResourceQuotaList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of AppliedClusterResourceQuota + repeated AppliedClusterResourceQuota items = 2; +} + +// ClusterResourceQuota mirrors ResourceQuota at a cluster scope. This object is easily convertible to +// synthetic ResourceQuota object to allow quota evaluation re-use. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=clusterresourcequotas,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=config-operator,operatorOrdering=01 +// +openshift:compatibility-gen:level=1 +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +message ClusterResourceQuota { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec defines the desired quota + optional ClusterResourceQuotaSpec spec = 2; + + // status defines the actual enforced quota and its current usage + optional ClusterResourceQuotaStatus status = 3; +} + +// ClusterResourceQuotaList is a collection of ClusterResourceQuotas +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ClusterResourceQuotaList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of ClusterResourceQuotas + repeated ClusterResourceQuota items = 2; +} + +// ClusterResourceQuotaSelector is used to select projects. At least one of LabelSelector or AnnotationSelector +// must present. If only one is present, it is the only selection criteria. If both are specified, +// the project must match both restrictions. +message ClusterResourceQuotaSelector { + // LabelSelector is used to select projects by label. + // +optional + // +nullable + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labels = 1; + + // AnnotationSelector is used to select projects by annotation. + // +optional + // +nullable + map annotations = 2; +} + +// ClusterResourceQuotaSpec defines the desired quota restrictions +message ClusterResourceQuotaSpec { + // selector is the selector used to match projects. + // It should only select active projects on the scale of dozens (though it can select + // many more less active projects). These projects will contend on object creation through + // this resource. + optional ClusterResourceQuotaSelector selector = 1; + + // quota defines the desired quota + optional .k8s.io.api.core.v1.ResourceQuotaSpec quota = 2; +} + +// ClusterResourceQuotaStatus defines the actual enforced quota and its current usage +message ClusterResourceQuotaStatus { + // total defines the actual enforced quota and its current usage across all projects + optional .k8s.io.api.core.v1.ResourceQuotaStatus total = 1; + + // namespaces slices the usage by project. This division allows for quick resolution of + // deletion reconciliation inside of a single project without requiring a recalculation + // across all projects. This can be used to pull the deltas for a given project. + // +optional + // +nullable + repeated ResourceQuotaStatusByNamespace namespaces = 2; +} + +// ResourceQuotaStatusByNamespace gives status for a particular project +message ResourceQuotaStatusByNamespace { + // namespace the project this status applies to + optional string namespace = 1; + + // status indicates how many resources have been consumed by this project + optional .k8s.io.api.core.v1.ResourceQuotaStatus status = 2; +} + diff --git a/vendor/github.com/openshift/api/quota/v1/legacy.go b/vendor/github.com/openshift/api/quota/v1/legacy.go new file mode 100644 index 0000000000000..402690b5d60c5 --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/legacy.go @@ -0,0 +1,24 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &ClusterResourceQuota{}, + &ClusterResourceQuotaList{}, + &AppliedClusterResourceQuota{}, + &AppliedClusterResourceQuotaList{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/quota/v1/register.go b/vendor/github.com/openshift/api/quota/v1/register.go new file mode 100644 index 0000000000000..47c774ef23ec0 --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/register.go @@ -0,0 +1,41 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "quota.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ClusterResourceQuota{}, + &ClusterResourceQuotaList{}, + &AppliedClusterResourceQuota{}, + &AppliedClusterResourceQuotaList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/quota/v1/types.go b/vendor/github.com/openshift/api/quota/v1/types.go new file mode 100644 index 0000000000000..0cfb85f87e4a1 --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/types.go @@ -0,0 +1,145 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterResourceQuota mirrors ResourceQuota at a cluster scope. This object is easily convertible to +// synthetic ResourceQuota object to allow quota evaluation re-use. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=clusterresourcequotas,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=config-operator,operatorOrdering=01 +// +openshift:compatibility-gen:level=1 +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type ClusterResourceQuota struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + + // spec defines the desired quota + Spec ClusterResourceQuotaSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // status defines the actual enforced quota and its current usage + Status ClusterResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ClusterResourceQuotaSpec defines the desired quota restrictions +type ClusterResourceQuotaSpec struct { + // selector is the selector used to match projects. + // It should only select active projects on the scale of dozens (though it can select + // many more less active projects). These projects will contend on object creation through + // this resource. + Selector ClusterResourceQuotaSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"` + + // quota defines the desired quota + Quota corev1.ResourceQuotaSpec `json:"quota" protobuf:"bytes,2,opt,name=quota"` +} + +// ClusterResourceQuotaSelector is used to select projects. At least one of LabelSelector or AnnotationSelector +// must present. If only one is present, it is the only selection criteria. If both are specified, +// the project must match both restrictions. +type ClusterResourceQuotaSelector struct { + // LabelSelector is used to select projects by label. + // +optional + // +nullable + LabelSelector *metav1.LabelSelector `json:"labels" protobuf:"bytes,1,opt,name=labels"` + + // AnnotationSelector is used to select projects by annotation. + // +optional + // +nullable + AnnotationSelector map[string]string `json:"annotations" protobuf:"bytes,2,rep,name=annotations"` +} + +// ClusterResourceQuotaStatus defines the actual enforced quota and its current usage +type ClusterResourceQuotaStatus struct { + // total defines the actual enforced quota and its current usage across all projects + Total corev1.ResourceQuotaStatus `json:"total" protobuf:"bytes,1,opt,name=total"` + + // namespaces slices the usage by project. This division allows for quick resolution of + // deletion reconciliation inside of a single project without requiring a recalculation + // across all projects. This can be used to pull the deltas for a given project. + // +optional + // +nullable + Namespaces ResourceQuotasStatusByNamespace `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterResourceQuotaList is a collection of ClusterResourceQuotas +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterResourceQuotaList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of ClusterResourceQuotas + Items []ClusterResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ResourceQuotasStatusByNamespace bundles multiple ResourceQuotaStatusByNamespace +type ResourceQuotasStatusByNamespace []ResourceQuotaStatusByNamespace + +// ResourceQuotaStatusByNamespace gives status for a particular project +type ResourceQuotaStatusByNamespace struct { + // namespace the project this status applies to + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + + // status indicates how many resources have been consumed by this project + Status corev1.ResourceQuotaStatus `json:"status" protobuf:"bytes,2,opt,name=status"` +} + +// +genclient +// +genclient:onlyVerbs=get,list +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AppliedClusterResourceQuota mirrors ClusterResourceQuota at a project scope, for projection +// into a project. It allows a project-admin to know which ClusterResourceQuotas are applied to +// his project and their associated usage. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type AppliedClusterResourceQuota struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + + // spec defines the desired quota + Spec ClusterResourceQuotaSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // status defines the actual enforced quota and its current usage + Status ClusterResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AppliedClusterResourceQuotaList is a collection of AppliedClusterResourceQuotas +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type AppliedClusterResourceQuotaList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of AppliedClusterResourceQuota + Items []AppliedClusterResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/openshift/api/quota/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/quota/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..72ac882fbd45d --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/zz_generated.deepcopy.go @@ -0,0 +1,242 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppliedClusterResourceQuota) DeepCopyInto(out *AppliedClusterResourceQuota) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppliedClusterResourceQuota. +func (in *AppliedClusterResourceQuota) DeepCopy() *AppliedClusterResourceQuota { + if in == nil { + return nil + } + out := new(AppliedClusterResourceQuota) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AppliedClusterResourceQuota) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppliedClusterResourceQuotaList) DeepCopyInto(out *AppliedClusterResourceQuotaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AppliedClusterResourceQuota, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppliedClusterResourceQuotaList. +func (in *AppliedClusterResourceQuotaList) DeepCopy() *AppliedClusterResourceQuotaList { + if in == nil { + return nil + } + out := new(AppliedClusterResourceQuotaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AppliedClusterResourceQuotaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceQuota) DeepCopyInto(out *ClusterResourceQuota) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuota. +func (in *ClusterResourceQuota) DeepCopy() *ClusterResourceQuota { + if in == nil { + return nil + } + out := new(ClusterResourceQuota) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterResourceQuota) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceQuotaList) DeepCopyInto(out *ClusterResourceQuotaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterResourceQuota, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuotaList. +func (in *ClusterResourceQuotaList) DeepCopy() *ClusterResourceQuotaList { + if in == nil { + return nil + } + out := new(ClusterResourceQuotaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterResourceQuotaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceQuotaSelector) DeepCopyInto(out *ClusterResourceQuotaSelector) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.AnnotationSelector != nil { + in, out := &in.AnnotationSelector, &out.AnnotationSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuotaSelector. +func (in *ClusterResourceQuotaSelector) DeepCopy() *ClusterResourceQuotaSelector { + if in == nil { + return nil + } + out := new(ClusterResourceQuotaSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceQuotaSpec) DeepCopyInto(out *ClusterResourceQuotaSpec) { + *out = *in + in.Selector.DeepCopyInto(&out.Selector) + in.Quota.DeepCopyInto(&out.Quota) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuotaSpec. +func (in *ClusterResourceQuotaSpec) DeepCopy() *ClusterResourceQuotaSpec { + if in == nil { + return nil + } + out := new(ClusterResourceQuotaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceQuotaStatus) DeepCopyInto(out *ClusterResourceQuotaStatus) { + *out = *in + in.Total.DeepCopyInto(&out.Total) + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make(ResourceQuotasStatusByNamespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuotaStatus. +func (in *ClusterResourceQuotaStatus) DeepCopy() *ClusterResourceQuotaStatus { + if in == nil { + return nil + } + out := new(ClusterResourceQuotaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaStatusByNamespace) DeepCopyInto(out *ResourceQuotaStatusByNamespace) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatusByNamespace. +func (in *ResourceQuotaStatusByNamespace) DeepCopy() *ResourceQuotaStatusByNamespace { + if in == nil { + return nil + } + out := new(ResourceQuotaStatusByNamespace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ResourceQuotasStatusByNamespace) DeepCopyInto(out *ResourceQuotasStatusByNamespace) { + { + in := &in + *out = make(ResourceQuotasStatusByNamespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotasStatusByNamespace. +func (in ResourceQuotasStatusByNamespace) DeepCopy() ResourceQuotasStatusByNamespace { + if in == nil { + return nil + } + out := new(ResourceQuotasStatusByNamespace) + in.DeepCopyInto(out) + return *out +} diff --git a/vendor/github.com/openshift/api/quota/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/quota/v1/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 0000000000000..b0fae46f7d449 --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,22 @@ +clusterresourcequotas.quota.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: clusterresourcequotas.quota.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_03" + GroupName: quota.openshift.io + HasStatus: true + KindName: ClusterResourceQuota + Labels: {} + PluralName: clusterresourcequotas + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + diff --git a/vendor/github.com/openshift/api/quota/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/quota/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000..1bb84b8176426 --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,96 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AppliedClusterResourceQuota = map[string]string{ + "": "AppliedClusterResourceQuota mirrors ClusterResourceQuota at a project scope, for projection into a project. It allows a project-admin to know which ClusterResourceQuotas are applied to his project and their associated usage.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec defines the desired quota", + "status": "status defines the actual enforced quota and its current usage", +} + +func (AppliedClusterResourceQuota) SwaggerDoc() map[string]string { + return map_AppliedClusterResourceQuota +} + +var map_AppliedClusterResourceQuotaList = map[string]string{ + "": "AppliedClusterResourceQuotaList is a collection of AppliedClusterResourceQuotas\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of AppliedClusterResourceQuota", +} + +func (AppliedClusterResourceQuotaList) SwaggerDoc() map[string]string { + return map_AppliedClusterResourceQuotaList +} + +var map_ClusterResourceQuota = map[string]string{ + "": "ClusterResourceQuota mirrors ResourceQuota at a cluster scope. This object is easily convertible to synthetic ResourceQuota object to allow quota evaluation re-use.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec defines the desired quota", + "status": "status defines the actual enforced quota and its current usage", +} + +func (ClusterResourceQuota) SwaggerDoc() map[string]string { + return map_ClusterResourceQuota +} + +var map_ClusterResourceQuotaList = map[string]string{ + "": "ClusterResourceQuotaList is a collection of ClusterResourceQuotas\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of ClusterResourceQuotas", +} + +func (ClusterResourceQuotaList) SwaggerDoc() map[string]string { + return map_ClusterResourceQuotaList +} + +var map_ClusterResourceQuotaSelector = map[string]string{ + "": "ClusterResourceQuotaSelector is used to select projects. At least one of LabelSelector or AnnotationSelector must present. If only one is present, it is the only selection criteria. If both are specified, the project must match both restrictions.", + "labels": "LabelSelector is used to select projects by label.", + "annotations": "AnnotationSelector is used to select projects by annotation.", +} + +func (ClusterResourceQuotaSelector) SwaggerDoc() map[string]string { + return map_ClusterResourceQuotaSelector +} + +var map_ClusterResourceQuotaSpec = map[string]string{ + "": "ClusterResourceQuotaSpec defines the desired quota restrictions", + "selector": "selector is the selector used to match projects. It should only select active projects on the scale of dozens (though it can select many more less active projects). These projects will contend on object creation through this resource.", + "quota": "quota defines the desired quota", +} + +func (ClusterResourceQuotaSpec) SwaggerDoc() map[string]string { + return map_ClusterResourceQuotaSpec +} + +var map_ClusterResourceQuotaStatus = map[string]string{ + "": "ClusterResourceQuotaStatus defines the actual enforced quota and its current usage", + "total": "total defines the actual enforced quota and its current usage across all projects", + "namespaces": "namespaces slices the usage by project. This division allows for quick resolution of deletion reconciliation inside of a single project without requiring a recalculation across all projects. This can be used to pull the deltas for a given project.", +} + +func (ClusterResourceQuotaStatus) SwaggerDoc() map[string]string { + return map_ClusterResourceQuotaStatus +} + +var map_ResourceQuotaStatusByNamespace = map[string]string{ + "": "ResourceQuotaStatusByNamespace gives status for a particular project", + "namespace": "namespace the project this status applies to", + "status": "status indicates how many resources have been consumed by this project", +} + +func (ResourceQuotaStatusByNamespace) SwaggerDoc() map[string]string { + return map_ResourceQuotaStatusByNamespace +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/route/v1/Makefile b/vendor/github.com/openshift/api/route/v1/Makefile new file mode 100644 index 0000000000000..0e605762078bb --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="route.openshift.io/v1" diff --git a/vendor/github.com/openshift/api/route/v1/doc.go b/vendor/github.com/openshift/api/route/v1/doc.go new file mode 100644 index 0000000000000..e56fbbd8d1fe3 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/route/apis/route +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=route.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/route/v1/generated.pb.go b/vendor/github.com/openshift/api/route/v1/generated.pb.go new file mode 100644 index 0000000000000..2adcd1cc86920 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/generated.pb.go @@ -0,0 +1,4276 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/route/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } +func (*LocalObjectReference) ProtoMessage() {} +func (*LocalObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{0} +} +func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalObjectReference.Merge(m, src) +} +func (m *LocalObjectReference) XXX_Size() int { + return m.Size() +} +func (m *LocalObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_LocalObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo + +func (m *Route) Reset() { *m = Route{} } +func (*Route) ProtoMessage() {} +func (*Route) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{1} +} +func (m *Route) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Route) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Route) XXX_Merge(src proto.Message) { + xxx_messageInfo_Route.Merge(m, src) +} +func (m *Route) XXX_Size() int { + return m.Size() +} +func (m *Route) XXX_DiscardUnknown() { + xxx_messageInfo_Route.DiscardUnknown(m) +} + +var xxx_messageInfo_Route proto.InternalMessageInfo + +func (m *RouteHTTPHeader) Reset() { *m = RouteHTTPHeader{} } +func (*RouteHTTPHeader) ProtoMessage() {} +func (*RouteHTTPHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{2} +} +func (m *RouteHTTPHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteHTTPHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteHTTPHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteHTTPHeader.Merge(m, src) +} +func (m *RouteHTTPHeader) XXX_Size() int { + return m.Size() +} +func (m *RouteHTTPHeader) XXX_DiscardUnknown() { + xxx_messageInfo_RouteHTTPHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteHTTPHeader proto.InternalMessageInfo + +func (m *RouteHTTPHeaderActionUnion) Reset() { *m = RouteHTTPHeaderActionUnion{} } +func (*RouteHTTPHeaderActionUnion) ProtoMessage() {} +func (*RouteHTTPHeaderActionUnion) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{3} +} +func (m *RouteHTTPHeaderActionUnion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteHTTPHeaderActionUnion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteHTTPHeaderActionUnion) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteHTTPHeaderActionUnion.Merge(m, src) +} +func (m *RouteHTTPHeaderActionUnion) XXX_Size() int { + return m.Size() +} +func (m *RouteHTTPHeaderActionUnion) XXX_DiscardUnknown() { + xxx_messageInfo_RouteHTTPHeaderActionUnion.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteHTTPHeaderActionUnion proto.InternalMessageInfo + +func (m *RouteHTTPHeaderActions) Reset() { *m = RouteHTTPHeaderActions{} } +func (*RouteHTTPHeaderActions) ProtoMessage() {} +func (*RouteHTTPHeaderActions) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{4} +} +func (m *RouteHTTPHeaderActions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteHTTPHeaderActions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteHTTPHeaderActions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteHTTPHeaderActions.Merge(m, src) +} +func (m *RouteHTTPHeaderActions) XXX_Size() int { + return m.Size() +} +func (m *RouteHTTPHeaderActions) XXX_DiscardUnknown() { + xxx_messageInfo_RouteHTTPHeaderActions.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteHTTPHeaderActions proto.InternalMessageInfo + +func (m *RouteHTTPHeaders) Reset() { *m = RouteHTTPHeaders{} } +func (*RouteHTTPHeaders) ProtoMessage() {} +func (*RouteHTTPHeaders) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{5} +} +func (m *RouteHTTPHeaders) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteHTTPHeaders) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteHTTPHeaders) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteHTTPHeaders.Merge(m, src) +} +func (m *RouteHTTPHeaders) XXX_Size() int { + return m.Size() +} +func (m *RouteHTTPHeaders) XXX_DiscardUnknown() { + xxx_messageInfo_RouteHTTPHeaders.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteHTTPHeaders proto.InternalMessageInfo + +func (m *RouteIngress) Reset() { *m = RouteIngress{} } +func (*RouteIngress) ProtoMessage() {} +func (*RouteIngress) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{6} +} +func (m *RouteIngress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteIngress) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteIngress.Merge(m, src) +} +func (m *RouteIngress) XXX_Size() int { + return m.Size() +} +func (m *RouteIngress) XXX_DiscardUnknown() { + xxx_messageInfo_RouteIngress.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteIngress proto.InternalMessageInfo + +func (m *RouteIngressCondition) Reset() { *m = RouteIngressCondition{} } +func (*RouteIngressCondition) ProtoMessage() {} +func (*RouteIngressCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{7} +} +func (m *RouteIngressCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteIngressCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteIngressCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteIngressCondition.Merge(m, src) +} +func (m *RouteIngressCondition) XXX_Size() int { + return m.Size() +} +func (m *RouteIngressCondition) XXX_DiscardUnknown() { + xxx_messageInfo_RouteIngressCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteIngressCondition proto.InternalMessageInfo + +func (m *RouteList) Reset() { *m = RouteList{} } +func (*RouteList) ProtoMessage() {} +func (*RouteList) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{8} +} +func (m *RouteList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteList.Merge(m, src) +} +func (m *RouteList) XXX_Size() int { + return m.Size() +} +func (m *RouteList) XXX_DiscardUnknown() { + xxx_messageInfo_RouteList.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteList proto.InternalMessageInfo + +func (m *RoutePort) Reset() { *m = RoutePort{} } +func (*RoutePort) ProtoMessage() {} +func (*RoutePort) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{9} +} +func (m *RoutePort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoutePort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoutePort) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoutePort.Merge(m, src) +} +func (m *RoutePort) XXX_Size() int { + return m.Size() +} +func (m *RoutePort) XXX_DiscardUnknown() { + xxx_messageInfo_RoutePort.DiscardUnknown(m) +} + +var xxx_messageInfo_RoutePort proto.InternalMessageInfo + +func (m *RouteSetHTTPHeader) Reset() { *m = RouteSetHTTPHeader{} } +func (*RouteSetHTTPHeader) ProtoMessage() {} +func (*RouteSetHTTPHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{10} +} +func (m *RouteSetHTTPHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteSetHTTPHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteSetHTTPHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteSetHTTPHeader.Merge(m, src) +} +func (m *RouteSetHTTPHeader) XXX_Size() int { + return m.Size() +} +func (m *RouteSetHTTPHeader) XXX_DiscardUnknown() { + xxx_messageInfo_RouteSetHTTPHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteSetHTTPHeader proto.InternalMessageInfo + +func (m *RouteSpec) Reset() { *m = RouteSpec{} } +func (*RouteSpec) ProtoMessage() {} +func (*RouteSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{11} +} +func (m *RouteSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteSpec.Merge(m, src) +} +func (m *RouteSpec) XXX_Size() int { + return m.Size() +} +func (m *RouteSpec) XXX_DiscardUnknown() { + xxx_messageInfo_RouteSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteSpec proto.InternalMessageInfo + +func (m *RouteStatus) Reset() { *m = RouteStatus{} } +func (*RouteStatus) ProtoMessage() {} +func (*RouteStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{12} +} +func (m *RouteStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteStatus.Merge(m, src) +} +func (m *RouteStatus) XXX_Size() int { + return m.Size() +} +func (m *RouteStatus) XXX_DiscardUnknown() { + xxx_messageInfo_RouteStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteStatus proto.InternalMessageInfo + +func (m *RouteTargetReference) Reset() { *m = RouteTargetReference{} } +func (*RouteTargetReference) ProtoMessage() {} +func (*RouteTargetReference) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{13} +} +func (m *RouteTargetReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteTargetReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteTargetReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteTargetReference.Merge(m, src) +} +func (m *RouteTargetReference) XXX_Size() int { + return m.Size() +} +func (m *RouteTargetReference) XXX_DiscardUnknown() { + xxx_messageInfo_RouteTargetReference.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteTargetReference proto.InternalMessageInfo + +func (m *RouterShard) Reset() { *m = RouterShard{} } +func (*RouterShard) ProtoMessage() {} +func (*RouterShard) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{14} +} +func (m *RouterShard) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouterShard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouterShard) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouterShard.Merge(m, src) +} +func (m *RouterShard) XXX_Size() int { + return m.Size() +} +func (m *RouterShard) XXX_DiscardUnknown() { + xxx_messageInfo_RouterShard.DiscardUnknown(m) +} + +var xxx_messageInfo_RouterShard proto.InternalMessageInfo + +func (m *TLSConfig) Reset() { *m = TLSConfig{} } +func (*TLSConfig) ProtoMessage() {} +func (*TLSConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{15} +} +func (m *TLSConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TLSConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TLSConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_TLSConfig.Merge(m, src) +} +func (m *TLSConfig) XXX_Size() int { + return m.Size() +} +func (m *TLSConfig) XXX_DiscardUnknown() { + xxx_messageInfo_TLSConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_TLSConfig proto.InternalMessageInfo + +func init() { + proto.RegisterType((*LocalObjectReference)(nil), "github.com.openshift.api.route.v1.LocalObjectReference") + proto.RegisterType((*Route)(nil), "github.com.openshift.api.route.v1.Route") + proto.RegisterType((*RouteHTTPHeader)(nil), "github.com.openshift.api.route.v1.RouteHTTPHeader") + proto.RegisterType((*RouteHTTPHeaderActionUnion)(nil), "github.com.openshift.api.route.v1.RouteHTTPHeaderActionUnion") + proto.RegisterType((*RouteHTTPHeaderActions)(nil), "github.com.openshift.api.route.v1.RouteHTTPHeaderActions") + proto.RegisterType((*RouteHTTPHeaders)(nil), "github.com.openshift.api.route.v1.RouteHTTPHeaders") + proto.RegisterType((*RouteIngress)(nil), "github.com.openshift.api.route.v1.RouteIngress") + proto.RegisterType((*RouteIngressCondition)(nil), "github.com.openshift.api.route.v1.RouteIngressCondition") + proto.RegisterType((*RouteList)(nil), "github.com.openshift.api.route.v1.RouteList") + proto.RegisterType((*RoutePort)(nil), "github.com.openshift.api.route.v1.RoutePort") + proto.RegisterType((*RouteSetHTTPHeader)(nil), "github.com.openshift.api.route.v1.RouteSetHTTPHeader") + proto.RegisterType((*RouteSpec)(nil), "github.com.openshift.api.route.v1.RouteSpec") + proto.RegisterType((*RouteStatus)(nil), "github.com.openshift.api.route.v1.RouteStatus") + proto.RegisterType((*RouteTargetReference)(nil), "github.com.openshift.api.route.v1.RouteTargetReference") + proto.RegisterType((*RouterShard)(nil), "github.com.openshift.api.route.v1.RouterShard") + proto.RegisterType((*TLSConfig)(nil), "github.com.openshift.api.route.v1.TLSConfig") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/route/v1/generated.proto", fileDescriptor_373b8fa7ff738721) +} + +var fileDescriptor_373b8fa7ff738721 = []byte{ + // 1420 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0xdd, 0x6e, 0x13, 0xc7, + 0x17, 0xcf, 0xc6, 0x76, 0x1c, 0x8f, 0xf9, 0x1c, 0xbe, 0x4c, 0x24, 0x6c, 0xd8, 0xbf, 0xf4, 0x17, + 0x54, 0x74, 0xdd, 0x04, 0x68, 0x41, 0x15, 0x17, 0x6c, 0x40, 0x10, 0x30, 0x21, 0x1a, 0xbb, 0xa0, + 0x22, 0x2a, 0x75, 0xb2, 0x3b, 0xb6, 0xa7, 0xb1, 0x67, 0x97, 0x99, 0x71, 0x20, 0x37, 0x15, 0x6a, + 0x5f, 0x80, 0xde, 0xf6, 0x15, 0xaa, 0xde, 0xf7, 0x11, 0xb8, 0xe4, 0x92, 0xde, 0x58, 0x8d, 0x7b, + 0xd9, 0x37, 0xc8, 0x55, 0x35, 0xb3, 0xe3, 0xdd, 0xb5, 0x63, 0x13, 0x07, 0xf5, 0xce, 0x7b, 0xce, + 0xf9, 0xfd, 0xce, 0xc7, 0x9c, 0x39, 0x67, 0x12, 0xb0, 0xdc, 0xa2, 0xb2, 0xdd, 0xdb, 0x74, 0xbc, + 0xa0, 0x5b, 0x0d, 0x42, 0xc2, 0x44, 0x9b, 0x36, 0x65, 0x15, 0x87, 0xb4, 0xca, 0x83, 0x9e, 0x24, + 0xd5, 0xed, 0xe5, 0x6a, 0x8b, 0x30, 0xc2, 0xb1, 0x24, 0xbe, 0x13, 0xf2, 0x40, 0x06, 0xf0, 0x52, + 0x02, 0x71, 0x62, 0x88, 0x83, 0x43, 0xea, 0x68, 0x88, 0xb3, 0xbd, 0xbc, 0xf4, 0x79, 0x8a, 0xb5, + 0x15, 0xb4, 0x82, 0xaa, 0x46, 0x6e, 0xf6, 0x9a, 0xfa, 0x4b, 0x7f, 0xe8, 0x5f, 0x11, 0xe3, 0x92, + 0xbd, 0x75, 0x53, 0x38, 0x34, 0xd0, 0x6e, 0xbd, 0x80, 0x4f, 0xf2, 0xba, 0x74, 0x3d, 0xb1, 0xe9, + 0x62, 0xaf, 0x4d, 0x19, 0xe1, 0x3b, 0xd5, 0x70, 0xab, 0xa5, 0x04, 0xa2, 0xda, 0x25, 0x12, 0x4f, + 0x42, 0x7d, 0x39, 0x0d, 0xc5, 0x7b, 0x4c, 0xd2, 0x2e, 0xa9, 0x0a, 0xaf, 0x4d, 0xba, 0x78, 0x1f, + 0xee, 0xda, 0x34, 0x5c, 0x4f, 0xd2, 0x4e, 0x95, 0x32, 0x29, 0x24, 0x1f, 0x07, 0xd9, 0x37, 0xc1, + 0xe9, 0x5a, 0xe0, 0xe1, 0xce, 0x93, 0xcd, 0x1f, 0x88, 0x27, 0x11, 0x69, 0x12, 0x4e, 0x98, 0x47, + 0xe0, 0x45, 0x90, 0x65, 0xb8, 0x4b, 0x4a, 0xd6, 0x45, 0xeb, 0x72, 0xc1, 0x3d, 0xf2, 0xae, 0x5f, + 0x99, 0x1b, 0xf4, 0x2b, 0xd9, 0x75, 0xdc, 0x25, 0x48, 0x6b, 0xec, 0x5f, 0xe6, 0x41, 0x0e, 0xa9, + 0xe2, 0xc1, 0xef, 0xc1, 0xa2, 0xca, 0xc5, 0xc7, 0x12, 0x6b, 0xfb, 0xe2, 0xca, 0x17, 0x4e, 0x14, + 0x8b, 0x93, 0x8e, 0xc5, 0x09, 0xb7, 0x5a, 0x4a, 0x20, 0x1c, 0x65, 0xed, 0x6c, 0x2f, 0x3b, 0x91, + 0xd3, 0xc7, 0x44, 0x62, 0x17, 0x1a, 0x0f, 0x20, 0x91, 0xa1, 0x98, 0x15, 0xae, 0x83, 0xac, 0x08, + 0x89, 0x57, 0x9a, 0xd7, 0xec, 0x57, 0x9d, 0x03, 0x4f, 0xd3, 0xd1, 0x91, 0xd5, 0x43, 0xe2, 0x25, + 0xb1, 0xab, 0x2f, 0xa4, 0x79, 0xe0, 0x53, 0xb0, 0x20, 0x24, 0x96, 0x3d, 0x51, 0xca, 0x68, 0x46, + 0x67, 0x66, 0x46, 0x8d, 0x72, 0x8f, 0x19, 0xce, 0x85, 0xe8, 0x1b, 0x19, 0x36, 0xfb, 0x57, 0x0b, + 0x1c, 0xd7, 0x76, 0x0f, 0x1a, 0x8d, 0x8d, 0x07, 0x04, 0xfb, 0x84, 0x1f, 0x5c, 0x49, 0x48, 0xc0, + 0x02, 0xf6, 0x24, 0x0d, 0x98, 0xc9, 0xef, 0xf6, 0xac, 0xd1, 0x24, 0x5e, 0xee, 0x68, 0xfc, 0x37, + 0x8c, 0x06, 0x2c, 0x09, 0x2e, 0x12, 0x22, 0x43, 0x6e, 0xff, 0x6e, 0x81, 0xa5, 0xe9, 0x30, 0x78, + 0x1b, 0x64, 0xe5, 0x4e, 0x38, 0x8c, 0xf3, 0xca, 0x30, 0xce, 0xc6, 0x4e, 0x48, 0xf6, 0xfa, 0x95, + 0xf3, 0x13, 0x91, 0x4a, 0x89, 0x34, 0x0c, 0x6e, 0x80, 0x8c, 0x20, 0xd2, 0x64, 0x70, 0x63, 0xe6, + 0x7a, 0x12, 0x99, 0x70, 0xba, 0xf9, 0x41, 0xbf, 0x92, 0xa9, 0x13, 0x89, 0x14, 0x95, 0xfd, 0xa7, + 0x05, 0xce, 0x4e, 0xf4, 0x2a, 0x54, 0xc7, 0x71, 0x22, 0xc2, 0x80, 0x09, 0x15, 0x6f, 0xe6, 0x72, + 0x71, 0x65, 0xe5, 0xf0, 0x35, 0x73, 0x4f, 0x98, 0x1c, 0x17, 0x91, 0xe1, 0x42, 0x31, 0x2b, 0xfc, + 0x0e, 0xe4, 0x39, 0x79, 0xd9, 0x23, 0x42, 0xa5, 0xf4, 0xa9, 0x0e, 0x8e, 0x1b, 0x07, 0x79, 0x14, + 0x51, 0xa1, 0x21, 0xa7, 0xfd, 0x1a, 0x9c, 0x18, 0x33, 0x16, 0xd0, 0x07, 0xf9, 0xe8, 0xa4, 0x84, + 0xb9, 0x45, 0xb7, 0x3e, 0xb5, 0x0f, 0x44, 0xe2, 0xd9, 0x08, 0xd0, 0x90, 0xda, 0xfe, 0x39, 0x03, + 0x8e, 0x68, 0xd0, 0x1a, 0x6b, 0x71, 0x22, 0x84, 0xea, 0xcf, 0x76, 0x20, 0xe4, 0x78, 0x7f, 0x3e, + 0x08, 0x84, 0x44, 0x5a, 0x03, 0x57, 0x00, 0xd0, 0xfe, 0xb8, 0xea, 0x59, 0x7d, 0xc2, 0x85, 0xe4, + 0xbe, 0xa2, 0x58, 0x83, 0x52, 0x56, 0xb0, 0x03, 0x80, 0x17, 0x30, 0x9f, 0x46, 0xf9, 0x64, 0x74, + 0x09, 0x6f, 0xce, 0x9a, 0x8f, 0x09, 0x6d, 0x75, 0x48, 0x90, 0x78, 0x8b, 0x45, 0x02, 0xa5, 0xf8, + 0x61, 0x03, 0x1c, 0x7b, 0x45, 0x3b, 0xbe, 0x87, 0xb9, 0xbf, 0x11, 0x74, 0xa8, 0xb7, 0x53, 0xca, + 0xea, 0x28, 0xaf, 0x1a, 0xdc, 0xb1, 0x67, 0x23, 0xda, 0xbd, 0x7e, 0x05, 0x8e, 0x4a, 0x74, 0x23, + 0x8f, 0x71, 0xc0, 0x6f, 0xc1, 0xb9, 0x28, 0xa3, 0x55, 0xcc, 0x02, 0x46, 0x3d, 0xdc, 0x51, 0x45, + 0xd1, 0x97, 0x39, 0xa7, 0xe9, 0x2b, 0x86, 0xfe, 0x1c, 0x9a, 0x6c, 0x86, 0xa6, 0xe1, 0xed, 0x7f, + 0xe6, 0xc1, 0x99, 0x89, 0xa9, 0xce, 0x74, 0x0d, 0xc7, 0x41, 0xa9, 0x6b, 0x58, 0x8b, 0x27, 0x5b, + 0x74, 0x4e, 0xd7, 0x47, 0x27, 0xd5, 0x5e, 0xbf, 0x32, 0x61, 0x71, 0x39, 0x31, 0xd3, 0xe8, 0x3c, + 0x83, 0xff, 0x07, 0x0b, 0x9c, 0x60, 0x11, 0x30, 0x3d, 0x27, 0x0b, 0xc9, 0x68, 0x41, 0x5a, 0x8a, + 0x8c, 0x16, 0x5e, 0x01, 0xf9, 0x2e, 0x11, 0x02, 0xb7, 0x88, 0x29, 0x7c, 0xdc, 0x7f, 0x8f, 0x23, + 0x31, 0x1a, 0xea, 0x21, 0x07, 0xb0, 0x83, 0x85, 0x6c, 0x70, 0xcc, 0x44, 0x14, 0x3c, 0x35, 0xf5, + 0x2c, 0xae, 0x7c, 0x36, 0xdb, 0xda, 0x50, 0x08, 0xf7, 0xec, 0xa0, 0x5f, 0x81, 0xb5, 0x7d, 0x4c, + 0x68, 0x02, 0xbb, 0xfd, 0x87, 0x05, 0x0a, 0xba, 0x70, 0x35, 0x2a, 0x24, 0x7c, 0xb1, 0x6f, 0x5d, + 0x39, 0xb3, 0xf9, 0x55, 0x68, 0xbd, 0xac, 0xe2, 0xc1, 0x31, 0x94, 0xa4, 0x56, 0xd5, 0x63, 0x90, + 0xa3, 0x92, 0x74, 0x85, 0x19, 0x1b, 0x97, 0x67, 0xed, 0x79, 0xf7, 0xa8, 0x21, 0xcd, 0xad, 0x29, + 0x38, 0x8a, 0x58, 0xec, 0x97, 0x26, 0xf2, 0x8d, 0x80, 0x4b, 0xe8, 0x03, 0x20, 0x31, 0x6f, 0x11, + 0xa9, 0xbe, 0x0e, 0x5c, 0xb5, 0x6a, 0xed, 0x3b, 0xd1, 0xda, 0x77, 0xd6, 0x98, 0x7c, 0xc2, 0xeb, + 0x92, 0x53, 0xd6, 0x4a, 0x2e, 0x53, 0x23, 0xe6, 0x42, 0x29, 0x5e, 0xfb, 0x16, 0x80, 0xfb, 0x67, + 0x33, 0xfc, 0x1f, 0xc8, 0x6d, 0xe3, 0x4e, 0x6f, 0xd8, 0x98, 0x71, 0xb4, 0x4f, 0x95, 0x10, 0x45, + 0x3a, 0xfb, 0xb7, 0x9c, 0x09, 0x57, 0xed, 0xda, 0x19, 0x26, 0x4b, 0x15, 0x14, 0x44, 0x6f, 0xd3, + 0x0f, 0xba, 0x98, 0xb2, 0xd2, 0xa2, 0x36, 0x3b, 0x69, 0xcc, 0x0a, 0xf5, 0xa1, 0x02, 0x25, 0x36, + 0x8a, 0x32, 0xc4, 0xb2, 0x6d, 0x9a, 0x3b, 0xa6, 0xdc, 0xc0, 0xb2, 0x8d, 0xb4, 0x06, 0xd6, 0xc1, + 0xbc, 0x0c, 0xcc, 0x5a, 0xff, 0x6a, 0xd6, 0xe2, 0x47, 0x95, 0x88, 0x5f, 0x3f, 0x2e, 0x30, 0xc4, + 0xf3, 0x8d, 0x00, 0xcd, 0xcb, 0x00, 0xbe, 0xb1, 0xc0, 0x49, 0xdc, 0x91, 0x84, 0x33, 0x2c, 0x89, + 0x8b, 0xbd, 0x2d, 0xc2, 0x7c, 0x51, 0xca, 0xea, 0x13, 0xfe, 0x64, 0x27, 0xe7, 0x8d, 0x93, 0x93, + 0x77, 0xc6, 0x99, 0xd1, 0x7e, 0x67, 0xf0, 0x21, 0xc8, 0x86, 0xea, 0xd4, 0x73, 0x87, 0x7b, 0x02, + 0xa9, 0x13, 0x75, 0x17, 0x75, 0x8d, 0xd4, 0x39, 0x6b, 0x0e, 0x78, 0x1f, 0x64, 0x64, 0x47, 0x94, + 0x16, 0x66, 0xa6, 0x6a, 0xd4, 0xea, 0xab, 0x01, 0x6b, 0xd2, 0x56, 0xb4, 0xa2, 0x1b, 0xb5, 0x3a, + 0x52, 0x0c, 0x13, 0xe6, 0x6e, 0xfe, 0x3f, 0x98, 0xbb, 0x4d, 0x50, 0x6c, 0x4b, 0x19, 0x9a, 0xbd, + 0x58, 0x2a, 0xe8, 0x30, 0xaf, 0x1d, 0x7e, 0x19, 0x0a, 0xf7, 0xf8, 0xa0, 0x5f, 0x29, 0xa6, 0x04, + 0x28, 0x4d, 0x6c, 0x53, 0x50, 0x4c, 0x3d, 0xea, 0xe0, 0x73, 0x90, 0xa7, 0xd1, 0x60, 0x35, 0x6f, + 0x8a, 0xea, 0x21, 0xf7, 0x55, 0x32, 0xf5, 0x8c, 0x00, 0x0d, 0x09, 0xed, 0x1f, 0xc1, 0xe9, 0x49, + 0x3d, 0xa0, 0xfa, 0x79, 0x8b, 0x32, 0x7f, 0xfc, 0x8a, 0x3c, 0xa2, 0xcc, 0x47, 0x5a, 0x13, 0x3f, + 0x1f, 0xe7, 0xa7, 0x3e, 0x1f, 0x6d, 0xb0, 0xf0, 0x8a, 0xd0, 0x56, 0x5b, 0xea, 0xae, 0xcf, 0xb9, + 0x40, 0x0d, 0xe8, 0x67, 0x5a, 0x82, 0x8c, 0xc6, 0x0e, 0x4c, 0xaa, 0xbc, 0xde, 0xc6, 0xdc, 0xd7, + 0xf7, 0x4e, 0xfd, 0x58, 0x4f, 0x1e, 0xa6, 0xc9, 0xbd, 0x1b, 0x2a, 0x50, 0x62, 0xa3, 0x00, 0x3e, + 0x13, 0xf5, 0x5e, 0xb3, 0x49, 0x5f, 0x9b, 0x50, 0x62, 0xc0, 0xdd, 0xf5, 0x7a, 0xa4, 0x40, 0x89, + 0x8d, 0xbd, 0x9b, 0x05, 0x85, 0xb8, 0x6b, 0xe0, 0x23, 0x50, 0x94, 0x84, 0x77, 0x29, 0xc3, 0xfa, + 0x99, 0x3b, 0xba, 0xdb, 0x8a, 0x8d, 0x44, 0xa5, 0x3a, 0xa4, 0x51, 0xab, 0xa7, 0x24, 0xba, 0x43, + 0xd2, 0x68, 0x78, 0x03, 0x14, 0x3d, 0xc2, 0x25, 0x6d, 0x52, 0x0f, 0xcb, 0x61, 0x61, 0x4e, 0x0d, + 0xc9, 0x56, 0x13, 0x15, 0x4a, 0xdb, 0xc1, 0x0b, 0x20, 0xb3, 0x45, 0x76, 0xcc, 0x22, 0x2b, 0x1a, + 0xf3, 0xcc, 0x23, 0xb2, 0x83, 0x94, 0x1c, 0x7e, 0x0d, 0x8e, 0x7a, 0x38, 0x05, 0x36, 0x8b, 0xec, + 0x8c, 0x31, 0x3c, 0xba, 0x7a, 0x27, 0xcd, 0x3c, 0x6a, 0x0b, 0x5f, 0x80, 0x92, 0x4f, 0x84, 0x34, + 0x11, 0x8e, 0x98, 0x9a, 0xa7, 0xc2, 0x45, 0xc3, 0x53, 0xba, 0x3b, 0xc5, 0x0e, 0x4d, 0x65, 0x80, + 0x6f, 0x2d, 0x70, 0x81, 0x32, 0x41, 0xbc, 0x1e, 0x27, 0xf7, 0xfc, 0x16, 0x49, 0x55, 0xc7, 0xdc, + 0xba, 0x05, 0xed, 0xe3, 0xa1, 0xf1, 0x71, 0x61, 0xed, 0x63, 0xc6, 0x7b, 0xfd, 0xca, 0xa5, 0x8f, + 0x1a, 0xe8, 0x8a, 0x7f, 0xdc, 0x21, 0xfc, 0xc9, 0x02, 0xa7, 0xc8, 0x6b, 0x3d, 0xa3, 0x3a, 0xe9, + 0x64, 0xf3, 0x33, 0xcf, 0xdd, 0x49, 0x7f, 0x75, 0xba, 0xe7, 0x06, 0xfd, 0xca, 0xa9, 0x7b, 0xfb, + 0x79, 0xd1, 0x24, 0x67, 0xee, 0xfd, 0x77, 0xbb, 0xe5, 0xb9, 0xf7, 0xbb, 0xe5, 0xb9, 0x0f, 0xbb, + 0xe5, 0xb9, 0x37, 0x83, 0xb2, 0xf5, 0x6e, 0x50, 0xb6, 0xde, 0x0f, 0xca, 0xd6, 0x87, 0x41, 0xd9, + 0xfa, 0x6b, 0x50, 0xb6, 0xde, 0xfe, 0x5d, 0x9e, 0x7b, 0x7e, 0xe9, 0xc0, 0xff, 0x16, 0xfc, 0x1b, + 0x00, 0x00, 0xff, 0xff, 0x62, 0x5d, 0xac, 0x2e, 0x51, 0x10, 0x00, 0x00, +} + +func (m *LocalObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Route) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Route) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Route) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouteHTTPHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteHTTPHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteHTTPHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouteHTTPHeaderActionUnion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteHTTPHeaderActionUnion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteHTTPHeaderActionUnion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Set != nil { + { + size, err := m.Set.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouteHTTPHeaderActions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteHTTPHeaderActions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteHTTPHeaderActions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Request) > 0 { + for iNdEx := len(m.Request) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Request[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Response) > 0 { + for iNdEx := len(m.Response) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Response[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RouteHTTPHeaders) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteHTTPHeaders) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteHTTPHeaders) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Actions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouteIngress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteIngress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.RouterCanonicalHostname) + copy(dAtA[i:], m.RouterCanonicalHostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RouterCanonicalHostname))) + i-- + dAtA[i] = 0x2a + i -= len(m.WildcardPolicy) + copy(dAtA[i:], m.WildcardPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WildcardPolicy))) + i-- + dAtA[i] = 0x22 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.RouterName) + copy(dAtA[i:], m.RouterName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RouterName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouteIngressCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteIngressCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteIngressCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LastTransitionTime != nil { + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouteList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoutePort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoutePort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoutePort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.TargetPort.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouteSetHTTPHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteSetHTTPHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteSetHTTPHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouteSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.HTTPHeaders != nil { + { + size, err := m.HTTPHeaders.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + i -= len(m.Subdomain) + copy(dAtA[i:], m.Subdomain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain))) + i-- + dAtA[i] = 0x42 + i -= len(m.WildcardPolicy) + copy(dAtA[i:], m.WildcardPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WildcardPolicy))) + i-- + dAtA[i] = 0x3a + if m.TLS != nil { + { + size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.Port != nil { + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.AlternateBackends) > 0 { + for iNdEx := len(m.AlternateBackends) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AlternateBackends[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouteStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RouteTargetReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteTargetReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteTargetReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Weight != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Weight)) + i-- + dAtA[i] = 0x18 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouterShard) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouterShard) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouterShard) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DNSSuffix) + copy(dAtA[i:], m.DNSSuffix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSSuffix))) + i-- + dAtA[i] = 0x12 + i -= len(m.ShardName) + copy(dAtA[i:], m.ShardName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShardName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TLSConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TLSConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TLSConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ExternalCertificate != nil { + { + size, err := m.ExternalCertificate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i -= len(m.InsecureEdgeTerminationPolicy) + copy(dAtA[i:], m.InsecureEdgeTerminationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.InsecureEdgeTerminationPolicy))) + i-- + dAtA[i] = 0x32 + i -= len(m.DestinationCACertificate) + copy(dAtA[i:], m.DestinationCACertificate) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationCACertificate))) + i-- + dAtA[i] = 0x2a + i -= len(m.CACertificate) + copy(dAtA[i:], m.CACertificate) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CACertificate))) + i-- + dAtA[i] = 0x22 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x1a + i -= len(m.Certificate) + copy(dAtA[i:], m.Certificate) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Certificate))) + i-- + dAtA[i] = 0x12 + i -= len(m.Termination) + copy(dAtA[i:], m.Termination) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Termination))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *LocalObjectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Route) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RouteHTTPHeader) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Action.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RouteHTTPHeaderActionUnion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Set != nil { + l = m.Set.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RouteHTTPHeaderActions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Response) > 0 { + for _, e := range m.Response { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Request) > 0 { + for _, e := range m.Request { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RouteHTTPHeaders) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Actions.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RouteIngress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RouterName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.WildcardPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RouterCanonicalHostname) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RouteIngressCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.LastTransitionTime != nil { + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RouteList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoutePort) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.TargetPort.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RouteSetHTTPHeader) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RouteSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.To.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AlternateBackends) > 0 { + for _, e := range m.AlternateBackends { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Port != nil { + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TLS != nil { + l = m.TLS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.WildcardPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subdomain) + n += 1 + l + sovGenerated(uint64(l)) + if m.HTTPHeaders != nil { + l = m.HTTPHeaders.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RouteStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RouteTargetReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Weight != nil { + n += 1 + sovGenerated(uint64(*m.Weight)) + } + return n +} + +func (m *RouterShard) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ShardName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DNSSuffix) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TLSConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Termination) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Certificate) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CACertificate) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DestinationCACertificate) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.InsecureEdgeTerminationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.ExternalCertificate != nil { + l = m.ExternalCertificate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *LocalObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalObjectReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Route) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Route{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "RouteSpec", "RouteSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "RouteStatus", "RouteStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RouteHTTPHeader) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RouteHTTPHeader{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Action:` + strings.Replace(strings.Replace(this.Action.String(), "RouteHTTPHeaderActionUnion", "RouteHTTPHeaderActionUnion", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RouteHTTPHeaderActionUnion) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RouteHTTPHeaderActionUnion{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Set:` + strings.Replace(this.Set.String(), "RouteSetHTTPHeader", "RouteSetHTTPHeader", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RouteHTTPHeaderActions) String() string { + if this == nil { + return "nil" + } + repeatedStringForResponse := "[]RouteHTTPHeader{" + for _, f := range this.Response { + repeatedStringForResponse += strings.Replace(strings.Replace(f.String(), "RouteHTTPHeader", "RouteHTTPHeader", 1), `&`, ``, 1) + "," + } + repeatedStringForResponse += "}" + repeatedStringForRequest := "[]RouteHTTPHeader{" + for _, f := range this.Request { + repeatedStringForRequest += strings.Replace(strings.Replace(f.String(), "RouteHTTPHeader", "RouteHTTPHeader", 1), `&`, ``, 1) + "," + } + repeatedStringForRequest += "}" + s := strings.Join([]string{`&RouteHTTPHeaderActions{`, + `Response:` + repeatedStringForResponse + `,`, + `Request:` + repeatedStringForRequest + `,`, + `}`, + }, "") + return s +} +func (this *RouteHTTPHeaders) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RouteHTTPHeaders{`, + `Actions:` + strings.Replace(strings.Replace(this.Actions.String(), "RouteHTTPHeaderActions", "RouteHTTPHeaderActions", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RouteIngress) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]RouteIngressCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "RouteIngressCondition", "RouteIngressCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&RouteIngress{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `RouterName:` + fmt.Sprintf("%v", this.RouterName) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `WildcardPolicy:` + fmt.Sprintf("%v", this.WildcardPolicy) + `,`, + `RouterCanonicalHostname:` + fmt.Sprintf("%v", this.RouterCanonicalHostname) + `,`, + `}`, + }, "") + return s +} +func (this *RouteIngressCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RouteIngressCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastTransitionTime:` + strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RouteList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Route{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Route", "Route", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RouteList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *RoutePort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoutePort{`, + `TargetPort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetPort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RouteSetHTTPHeader) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RouteSetHTTPHeader{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *RouteSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForAlternateBackends := "[]RouteTargetReference{" + for _, f := range this.AlternateBackends { + repeatedStringForAlternateBackends += strings.Replace(strings.Replace(f.String(), "RouteTargetReference", "RouteTargetReference", 1), `&`, ``, 1) + "," + } + repeatedStringForAlternateBackends += "}" + s := strings.Join([]string{`&RouteSpec{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `To:` + strings.Replace(strings.Replace(this.To.String(), "RouteTargetReference", "RouteTargetReference", 1), `&`, ``, 1) + `,`, + `AlternateBackends:` + repeatedStringForAlternateBackends + `,`, + `Port:` + strings.Replace(this.Port.String(), "RoutePort", "RoutePort", 1) + `,`, + `TLS:` + strings.Replace(this.TLS.String(), "TLSConfig", "TLSConfig", 1) + `,`, + `WildcardPolicy:` + fmt.Sprintf("%v", this.WildcardPolicy) + `,`, + `Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`, + `HTTPHeaders:` + strings.Replace(this.HTTPHeaders.String(), "RouteHTTPHeaders", "RouteHTTPHeaders", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RouteStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForIngress := "[]RouteIngress{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "RouteIngress", "RouteIngress", 1), `&`, ``, 1) + "," + } + repeatedStringForIngress += "}" + s := strings.Join([]string{`&RouteStatus{`, + `Ingress:` + repeatedStringForIngress + `,`, + `}`, + }, "") + return s +} +func (this *RouteTargetReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RouteTargetReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Weight:` + valueToStringGenerated(this.Weight) + `,`, + `}`, + }, "") + return s +} +func (this *RouterShard) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RouterShard{`, + `ShardName:` + fmt.Sprintf("%v", this.ShardName) + `,`, + `DNSSuffix:` + fmt.Sprintf("%v", this.DNSSuffix) + `,`, + `}`, + }, "") + return s +} +func (this *TLSConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TLSConfig{`, + `Termination:` + fmt.Sprintf("%v", this.Termination) + `,`, + `Certificate:` + fmt.Sprintf("%v", this.Certificate) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `CACertificate:` + fmt.Sprintf("%v", this.CACertificate) + `,`, + `DestinationCACertificate:` + fmt.Sprintf("%v", this.DestinationCACertificate) + `,`, + `InsecureEdgeTerminationPolicy:` + fmt.Sprintf("%v", this.InsecureEdgeTerminationPolicy) + `,`, + `ExternalCertificate:` + strings.Replace(this.ExternalCertificate.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Route) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Route: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Route: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteHTTPHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteHTTPHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteHTTPHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteHTTPHeaderActionUnion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteHTTPHeaderActionUnion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteHTTPHeaderActionUnion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = RouteHTTPHeaderActionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Set", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Set == nil { + m.Set = &RouteSetHTTPHeader{} + } + if err := m.Set.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteHTTPHeaderActions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteHTTPHeaderActions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteHTTPHeaderActions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Response = append(m.Response, RouteHTTPHeader{}) + if err := m.Response[len(m.Response)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Request = append(m.Request, RouteHTTPHeader{}) + if err := m.Request[len(m.Request)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteHTTPHeaders) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteHTTPHeaders: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteHTTPHeaders: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Actions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Actions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteIngress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteIngress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteIngress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RouterName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RouterName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, RouteIngressCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WildcardPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WildcardPolicy = WildcardPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RouterCanonicalHostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RouterCanonicalHostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteIngressCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteIngressCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteIngressCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = RouteIngressConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastTransitionTime == nil { + m.LastTransitionTime = &v1.Time{} + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Route{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoutePort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoutePort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoutePort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetPort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteSetHTTPHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteSetHTTPHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteSetHTTPHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AlternateBackends", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AlternateBackends = append(m.AlternateBackends, RouteTargetReference{}) + if err := m.AlternateBackends[len(m.AlternateBackends)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Port == nil { + m.Port = &RoutePort{} + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TLS == nil { + m.TLS = &TLSConfig{} + } + if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WildcardPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WildcardPolicy = WildcardPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subdomain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subdomain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPHeaders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HTTPHeaders == nil { + m.HTTPHeaders = &RouteHTTPHeaders{} + } + if err := m.HTTPHeaders.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ingress = append(m.Ingress, RouteIngress{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteTargetReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteTargetReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteTargetReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Weight = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouterShard) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouterShard: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouterShard: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShardName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSSuffix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DNSSuffix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TLSConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TLSConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TLSConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Termination", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Termination = TLSTerminationType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Certificate = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CACertificate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CACertificate = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationCACertificate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationCACertificate = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InsecureEdgeTerminationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InsecureEdgeTerminationPolicy = InsecureEdgeTerminationPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalCertificate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExternalCertificate == nil { + m.ExternalCertificate = &LocalObjectReference{} + } + if err := m.ExternalCertificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/route/v1/generated.proto b/vendor/github.com/openshift/api/route/v1/generated.proto new file mode 100644 index 0000000000000..d6e1845b401e5 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/generated.proto @@ -0,0 +1,466 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.route.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/route/v1"; + +// LocalObjectReference contains enough information to let you locate the +// referenced object inside the same namespace. +// +structType=atomic +message LocalObjectReference { + // name of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // +optional + optional string name = 1; +} + +// A route allows developers to expose services through an HTTP(S) aware load balancing and proxy +// layer via a public DNS entry. The route may further specify TLS options and a certificate, or +// specify a public CNAME that the router should also accept for HTTP and HTTPS traffic. An +// administrator typically configures their router to be visible outside the cluster firewall, and +// may also add additional security, caching, or traffic controls on the service content. Routers +// usually talk directly to the service endpoints. +// +// Once a route is created, the `host` field may not be changed. Generally, routers use the oldest +// route with a given host when resolving conflicts. +// +// Routers are subject to additional customization and may support additional controls via the +// annotations field. +// +// Because administrators may configure multiple routers, the route status field is used to +// return information to clients about the names and states of the route under each router. +// If a client chooses a duplicate name, for instance, the route status conditions are used +// to indicate the route cannot be chosen. +// +// To enable HTTP/2 ALPN on a route it requires a custom +// (non-wildcard) certificate. This prevents connection coalescing by +// clients, notably web browsers. We do not support HTTP/2 ALPN on +// routes that use the default certificate because of the risk of +// connection re-use/coalescing. Routes that do not have their own +// custom certificate will not be HTTP/2 ALPN-enabled on either the +// frontend or the backend. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message Route { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the desired state of the route + // +kubebuilder:validation:XValidation:rule="!has(self.tls) || self.tls.termination != 'passthrough' || !has(self.httpHeaders)",message="header actions are not permitted when tls termination is passthrough." + optional RouteSpec spec = 2; + + // status is the current state of the route + // +optional + optional RouteStatus status = 3; +} + +// RouteHTTPHeader specifies configuration for setting or deleting an HTTP header. +message RouteHTTPHeader { + // name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header + // name as defined in RFC 2616 section 4.2. + // The name must consist only of alphanumeric and the following special characters, "-!#$%&'*+.^_`". + // The following header names are reserved and may not be modified via this API: + // Strict-Transport-Security, Proxy, Cookie, Set-Cookie. + // It must be no more than 255 characters in length. + // Header name must be unique. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'strict-transport-security'",message="strict-transport-security header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'proxy'",message="proxy header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'cookie'",message="cookie header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'set-cookie'",message="set-cookie header may not be modified via header actions" + optional string name = 1; + + // action specifies actions to perform on headers, such as setting or deleting headers. + // +required + optional RouteHTTPHeaderActionUnion action = 2; +} + +// RouteHTTPHeaderActionUnion specifies an action to take on an HTTP header. +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Set' ? has(self.set) : !has(self.set)",message="set is required when type is Set, and forbidden otherwise" +// +union +message RouteHTTPHeaderActionUnion { + // type defines the type of the action to be applied on the header. + // Possible values are Set or Delete. + // Set allows you to set HTTP request and response headers. + // Delete allows you to delete HTTP request and response headers. + // +unionDiscriminator + // +kubebuilder:validation:Enum:=Set;Delete + // +required + optional string type = 1; + + // set defines the HTTP header that should be set: added if it doesn't exist or replaced if it does. + // This field is required when type is Set and forbidden otherwise. + // +optional + // +unionMember + optional RouteSetHTTPHeader set = 2; +} + +// RouteHTTPHeaderActions defines configuration for actions on HTTP request and response headers. +message RouteHTTPHeaderActions { + // response is a list of HTTP response headers to modify. + // Currently, actions may define to either `Set` or `Delete` headers values. + // Actions defined here will modify the response headers of all requests made through a route. + // These actions are applied to a specific Route defined within a cluster i.e. connections made through a route. + // Route actions will be executed before IngressController actions for response headers. + // Actions are applied in sequence as defined in this list. + // A maximum of 20 response header actions may be configured. + // You can use this field to specify HTTP response headers that should be set or deleted + // when forwarding responses from your application to the client. + // Sample fetchers allowed are "res.hdr" and "ssl_c_der". + // Converters allowed are "lower" and "base64". + // Example header values: "%[res.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". + // Note: This field cannot be used if your route uses TLS passthrough. + // + --- + // + Note: Any change to regex mentioned below must be reflected in the CRD validation of route in https://github.com/openshift/library-go/blob/master/pkg/route/validation/validation.go and vice-versa. + // +listType=map + // +listMapKey=name + // +optional + // +kubebuilder:validation:MaxItems=20 + // +kubebuilder:validation:XValidation:rule=`self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:res\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$')))`,message="Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are res.hdr, ssl_c_der. Converters allowed are lower, base64." + repeated RouteHTTPHeader response = 1; + + // request is a list of HTTP request headers to modify. + // Currently, actions may define to either `Set` or `Delete` headers values. + // Actions defined here will modify the request headers of all requests made through a route. + // These actions are applied to a specific Route defined within a cluster i.e. connections made through a route. + // Currently, actions may define to either `Set` or `Delete` headers values. + // Route actions will be executed after IngressController actions for request headers. + // Actions are applied in sequence as defined in this list. + // A maximum of 20 request header actions may be configured. + // You can use this field to specify HTTP request headers that should be set or deleted + // when forwarding connections from the client to your application. + // Sample fetchers allowed are "req.hdr" and "ssl_c_der". + // Converters allowed are "lower" and "base64". + // Example header values: "%[req.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". + // Any request header configuration applied directly via a Route resource using this API + // will override header configuration for a header of the same name applied via + // spec.httpHeaders.actions on the IngressController or route annotation. + // Note: This field cannot be used if your route uses TLS passthrough. + // + --- + // + Note: Any change to regex mentioned below must be reflected in the CRD validation of route in https://github.com/openshift/library-go/blob/master/pkg/route/validation/validation.go and vice-versa. + // +listType=map + // +listMapKey=name + // +optional + // +kubebuilder:validation:MaxItems=20 + // +kubebuilder:validation:XValidation:rule=`self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:req\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$')))`,message="Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64." + repeated RouteHTTPHeader request = 2; +} + +// RouteHTTPHeaders defines policy for HTTP headers. +message RouteHTTPHeaders { + // actions specifies options for modifying headers and their values. + // Note that this option only applies to cleartext HTTP connections + // and to secure HTTP connections for which the ingress controller + // terminates encryption (that is, edge-terminated or reencrypt + // connections). Headers cannot be modified for TLS passthrough + // connections. + // Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. + // `Strict-Transport-Security` may only be configured using the "haproxy.router.openshift.io/hsts_header" + // route annotation, and only in accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. + // In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after + // the actions specified in the IngressController's spec.httpHeaders.actions field. + // In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be + // executed after the actions specified in the Route's spec.httpHeaders.actions field. + // The headers set via this API will not appear in access logs. + // Any actions defined here are applied after any actions related to the following other fields: + // cache-control, spec.clientTLS, + // spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, + // and spec.httpHeaders.headerNameCaseAdjustments. + // The following header names are reserved and may not be modified via this API: + // Strict-Transport-Security, Proxy, Cookie, Set-Cookie. + // Note that the total size of all net added headers *after* interpolating dynamic values + // must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the + // IngressController. Please refer to the documentation + // for that API field for more details. + // +optional + optional RouteHTTPHeaderActions actions = 1; +} + +// RouteIngress holds information about the places where a route is exposed. +message RouteIngress { + // host is the host string under which the route is exposed; this value is required + optional string host = 1; + + // Name is a name chosen by the router to identify itself; this value is required + optional string routerName = 2; + + // conditions is the state of the route, may be empty. + // +listType=map + // +listMapKey=type + repeated RouteIngressCondition conditions = 3; + + // Wildcard policy is the wildcard policy that was allowed where this route is exposed. + optional string wildcardPolicy = 4; + + // CanonicalHostname is the external host name for the router that can be used as a CNAME + // for the host requested for this route. This value is optional and may not be set in all cases. + optional string routerCanonicalHostname = 5; +} + +// RouteIngressCondition contains details for the current condition of this route on a particular +// router. +message RouteIngressCondition { + // type is the type of the condition. + // Currently only Admitted or UnservableInFutureVersions. + optional string type = 1; + + // status is the status of the condition. + // Can be True, False, Unknown. + optional string status = 2; + + // (brief) reason for the condition's last transition, and is usually a machine and human + // readable constant + optional string reason = 3; + + // Human readable message indicating details about last transition. + optional string message = 4; + + // RFC 3339 date and time when this condition last transitioned + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 5; +} + +// RouteList is a collection of Routes. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message RouteList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of routes + repeated Route items = 2; +} + +// RoutePort defines a port mapping from a router to an endpoint in the service endpoints. +message RoutePort { + // The target port on pods selected by the service this route points to. + // If this is a string, it will be looked up as a named port in the target + // endpoints port list. Required + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 1; +} + +// RouteSetHTTPHeader specifies what value needs to be set on an HTTP header. +message RouteSetHTTPHeader { + // value specifies a header value. + // Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in + // http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and + // otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. + // The value of this field must be no more than 16384 characters in length. + // Note that the total size of all net added headers *after* interpolating dynamic values + // must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the + // IngressController. + // + --- + // + Note: This limit was selected as most common web servers have a limit of 16384 characters or some lower limit. + // + See . + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=16384 + optional string value = 1; +} + +// RouteSpec describes the hostname or path the route exposes, any security information, +// and one to four backends (services) the route points to. Requests are distributed +// among the backends depending on the weights assigned to each backend. When using +// roundrobin scheduling the portion of requests that go to each backend is the backend +// weight divided by the sum of all of the backend weights. When the backend has more than +// one endpoint the requests that end up on the backend are roundrobin distributed among +// the endpoints. Weights are between 0 and 256 with default 100. Weight 0 causes no requests +// to the backend. If all weights are zero the route will be considered to have no backends +// and return a standard 503 response. +// +// The `tls` field is optional and allows specific certificates or behavior for the +// route. Routers typically configure a default certificate on a wildcard domain to +// terminate routes without explicit certificates, but custom hostnames usually must +// choose passthrough (send traffic directly to the backend via the TLS Server-Name- +// Indication field) or provide a certificate. +message RouteSpec { + // host is an alias/DNS that points to the service. Optional. + // If not specified a route name will typically be automatically + // chosen. + // Must follow DNS952 subdomain conventions. + // + // +optional + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` + optional string host = 1; + + // subdomain is a DNS subdomain that is requested within the ingress controller's + // domain (as a subdomain). If host is set this field is ignored. An ingress + // controller may choose to ignore this suggested name, in which case the controller + // will report the assigned name in the status.ingress array or refuse to admit the + // route. If this value is set and the server does not support this field host will + // be populated automatically. Otherwise host is left empty. The field may have + // multiple parts separated by a dot, but not all ingress controllers may honor + // the request. This field may not be changed after creation except by a user with + // the update routes/custom-host permission. + // + // Example: subdomain `frontend` automatically receives the router subdomain + // `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`. + // + // +optional + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` + optional string subdomain = 8; + + // path that the router watches for, to route traffic for to the service. Optional + // + // +optional + // +kubebuilder:validation:Pattern=`^/` + optional string path = 2; + + // to is an object the route should use as the primary backend. Only the Service kind + // is allowed, and it will be defaulted to Service. If the weight field (0-256 default 100) + // is set to zero, no traffic will be sent to this backend. + optional RouteTargetReference to = 3; + + // alternateBackends allows up to 3 additional backends to be assigned to the route. + // Only the Service kind is allowed, and it will be defaulted to Service. + // Use the weight field in RouteTargetReference object to specify relative preference. + // + // +kubebuilder:validation:MaxItems=3 + // +listType=map + // +listMapKey=name + // +listMapKey=kind + repeated RouteTargetReference alternateBackends = 4; + + // If specified, the port to be used by the router. Most routers will use all + // endpoints exposed by the service by default - set this value to instruct routers + // which port to use. + optional RoutePort port = 5; + + // The tls field provides the ability to configure certificates and termination for the route. + optional TLSConfig tls = 6; + + // Wildcard policy if any for the route. + // Currently only 'Subdomain' or 'None' is allowed. + // + // +kubebuilder:validation:Enum=None;Subdomain;"" + // +kubebuilder:default=None + optional string wildcardPolicy = 7; + + // httpHeaders defines policy for HTTP headers. + // + // +optional + optional RouteHTTPHeaders httpHeaders = 9; +} + +// RouteStatus provides relevant info about the status of a route, including which routers +// acknowledge it. +message RouteStatus { + // ingress describes the places where the route may be exposed. The list of + // ingress points may contain duplicate Host or RouterName values. Routes + // are considered live once they are `Ready` + // +listType=atomic + repeated RouteIngress ingress = 1; +} + +// RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' +// kind is allowed. Use 'weight' field to emphasize one over others. +message RouteTargetReference { + // The kind of target that the route is referring to. Currently, only 'Service' is allowed + // + // +kubebuilder:validation:Enum=Service;"" + // +kubebuilder:default=Service + optional string kind = 1; + + // name of the service/target that is being referred to. e.g. name of the service + // + // +kubebuilder:validation:MinLength=1 + optional string name = 2; + + // weight as an integer between 0 and 256, default 100, that specifies the target's relative weight + // against other target reference objects. 0 suppresses requests to this backend. + // + // +optional + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=256 + // +kubebuilder:default=100 + optional int32 weight = 3; +} + +// RouterShard has information of a routing shard and is used to +// generate host names and routing table entries when a routing shard is +// allocated for a specific route. +// Caveat: This is WIP and will likely undergo modifications when sharding +// support is added. +message RouterShard { + // shardName uniquely identifies a router shard in the "set" of + // routers used for routing traffic to the services. + optional string shardName = 1; + + // dnsSuffix for the shard ala: shard-1.v3.openshift.com + optional string dnsSuffix = 2; +} + +// TLSConfig defines config used to secure a route and provide termination +// +// +kubebuilder:validation:XValidation:rule="has(self.termination) && has(self.insecureEdgeTerminationPolicy) ? !((self.termination=='passthrough') && (self.insecureEdgeTerminationPolicy=='Allow')) : true", message="cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=RouteExternalCertificate,rule="!(has(self.certificate) && has(self.externalCertificate))", message="cannot have both spec.tls.certificate and spec.tls.externalCertificate" +message TLSConfig { + // termination indicates termination type. + // + // * edge - TLS termination is done by the router and http is used to communicate with the backend (default) + // * passthrough - Traffic is sent straight to the destination without the router providing TLS termination + // * reencrypt - TLS termination is done by the router and https is used to communicate with the backend + // + // Note: passthrough termination is incompatible with httpHeader actions + // +kubebuilder:validation:Enum=edge;reencrypt;passthrough + optional string termination = 1; + + // certificate provides certificate contents. This should be a single serving certificate, not a certificate + // chain. Do not include a CA certificate. + optional string certificate = 2; + + // key provides key file contents + optional string key = 3; + + // caCertificate provides the cert authority certificate contents + optional string caCertificate = 4; + + // destinationCACertificate provides the contents of the ca certificate of the final destination. When using reencrypt + // termination this file should be provided in order to have routers use it for health checks on the secure connection. + // If this field is not specified, the router may provide its own destination CA and perform hostname validation using + // the short service name (service.namespace.svc), which allows infrastructure generated certificates to automatically + // verify. + optional string destinationCACertificate = 5; + + // insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While + // each router may make its own decisions on which ports to expose, this is normally port 80. + // + // If a route does not specify insecureEdgeTerminationPolicy, then the default behavior is "None". + // + // * Allow - traffic is sent to the server on the insecure port (edge/reencrypt terminations only). + // + // * None - no traffic is allowed on the insecure port (default). + // + // * Redirect - clients are redirected to the secure port. + // + // +kubebuilder:validation:Enum=Allow;None;Redirect;"" + optional string insecureEdgeTerminationPolicy = 6; + + // externalCertificate provides certificate contents as a secret reference. + // This should be a single serving certificate, not a certificate + // chain. Do not include a CA certificate. The secret referenced should + // be present in the same namespace as that of the Route. + // Forbidden when `certificate` is set. + // + // +openshift:enable:FeatureGate=RouteExternalCertificate + // +optional + optional LocalObjectReference externalCertificate = 7; +} + diff --git a/vendor/github.com/openshift/api/route/v1/legacy.go b/vendor/github.com/openshift/api/route/v1/legacy.go new file mode 100644 index 0000000000000..498f5dd0f0604 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/legacy.go @@ -0,0 +1,22 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &Route{}, + &RouteList{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/route/v1/register.go b/vendor/github.com/openshift/api/route/v1/register.go new file mode 100644 index 0000000000000..6f99ef5c96ae4 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/register.go @@ -0,0 +1,39 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "route.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &Route{}, + &RouteList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/route/v1/test-route-validation.sh b/vendor/github.com/openshift/api/route/v1/test-route-validation.sh new file mode 100644 index 0000000000000..f1192d4a16ca7 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/test-route-validation.sh @@ -0,0 +1,476 @@ +#!/bin/bash + +# This shell script runs a series of `oc` commands to create various OpenShift +# route objects, some invalid and some valid, and verifies that the API rejects +# the invalid ones and admits the valid ones. Note that this script does not +# verify defaulting behavior and does not examine the rejection reason; it only +# checks whether the `oc create` command succeeds or fails. This script +# requires a cluster and a kubeconfig in a location where oc will find it. + +set -uo pipefail + +expect_pass() { + rc=$? + if [[ $rc != 0 ]] + then + tput setaf 1 + echo "expected success: $*, got exit code $rc" + tput sgr0 + exit 1 + fi + tput setaf 2 + echo "got expected success: $*" + tput sgr0 +} + +expect_fail() { + rc=$? + if [[ $rc = 0 ]] + then + tput setaf 1 + echo "expected failure: $*, got exit code $rc" + exit 1 + fi + tput setaf 2 + echo "got expected failure: $*" + tput sgr0 +} + +delete_route() { + oc -n openshift-ingress delete routes.route/testroute || exit 1 +} + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + path: / + tls: + termination: passthrough + to: + kind: Service + name: router-internal-default +EOF +expect_fail 'passthrough with nonempty path' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + path: / + to: + kind: Service + name: router-internal-default +EOF +expect_pass 'non-TLS with nonempty path' +delete_route + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + path: / + tls: + termination: edge + to: + kind: Service + name: router-internal-default +EOF +expect_pass 'edge-terminated with nonempty path' +delete_route + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + path: x + tls: + termination: edge + to: + kind: Service + name: router-internal-default +EOF +expect_fail 'path starting with non-slash character' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + to: + kind: Service + name: router-internal-default + wildcardPolicy: Subdomain +EOF +expect_fail 'spec.wildcardPolicy: Subdomain requires a nonempty value for spec.host' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + port: + targetPort: "" +EOF +expect_fail 'cannot have empty spec.port.targetPort' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + port: + targetPort: 0 +EOF +expect_fail 'cannot have numeric 0 value for spec.port.targetPort' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + port: + targetPort: "0" +EOF +expect_pass 'can have string "0" value for spec.port.targetPort' +delete_route + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + port: + targetPort: 1 +EOF +expect_pass 'can have numeric 1 value for spec.port.targetPort' +delete_route + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + port: + targetPort: x +EOF +expect_pass 'can have string "x" value for spec.port.targetPort' +delete_route + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + tls: + termination: passthrough + to: + kind: Nonsense + name: router-internal-default +EOF +expect_fail 'nonsense value for spec.to.kind' + + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + tls: + termination: passthrough + to: + kind: Service + name: "" +EOF +expect_fail 'spec.to.name cannot be empty' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + weight: -1 +EOF +expect_fail 'spec.to.weight cannot be negative' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + weight: 300 +EOF +expect_fail 'spec.to.weight cannot exceed 256' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + weight: 100 +EOF +expect_pass 'spec.to.weight has a valid value' +delete_route + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + alternateBackends: + - name: router-internal-default + - name: router-internal-default + - name: router-internal-default + - name: router-internal-default +EOF +expect_fail 'cannot have >3 values under spec.alternateBackends' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + alternateBackends: + - name: router-internal-default + - name: "" + - name: router-internal-default +EOF +expect_fail 'cannot have empty spec.alternateBackends[*].name' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + alternateBackends: + - name: router-internal-default + - name: router-internal-default + - name: router-internal-default +EOF +expect_pass 'valid spec.alternateBackends' +delete_route + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + tls: + termination: passthrough + certificate: "x" +EOF +expect_fail 'cannot have both spec.tls.termination: passthrough and nonempty spec.tls.certificate' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + tls: + termination: passthrough + key: "x" +EOF +expect_fail 'cannot have both spec.tls.termination: passthrough and nonempty spec.tls.key' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + tls: + termination: passthrough + caCertificate: "x" +EOF +expect_fail 'cannot have both spec.tls.termination: passthrough and nonempty spec.tls.caCertificate' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + tls: + termination: passthrough + destinationCACertificate: "x" +EOF +expect_fail 'cannot have both spec.tls.termination: passthrough and nonempty spec.tls.destinationCACertificate' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + tls: + termination: edge + destinationCACertificate: "x" +EOF +expect_fail 'cannot have both spec.tls.termination: edge and nonempty spec.tls.destinationCACertificate' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + tls: + termination: edge + insecureEdgeTerminationPolicy: nonsense +EOF +expect_fail 'cannot have nonsense value for spec.tls.insecureEdgeTerminationPolicy' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + tls: + termination: passthrough + insecureEdgeTerminationPolicy: Allow +EOF +expect_fail 'cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + tls: + termination: passthrough + insecureEdgeTerminationPolicy: Redirect +EOF +expect_pass 'spec.tls.termination: passthrough is compatible with spec.tls.insecureEdgeTerminationPolicy: Redirect' +delete_route + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + tls: + termination: passthrough + insecureEdgeTerminationPolicy: None +EOF +expect_pass 'spec.tls.termination: passthrough is compatible with spec.tls.insecureEdgeTerminationPolicy: None' +delete_route diff --git a/vendor/github.com/openshift/api/route/v1/types.go b/vendor/github.com/openshift/api/route/v1/types.go new file mode 100644 index 0000000000000..8fc2508773860 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/types.go @@ -0,0 +1,555 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=routes,scope=Namespaced +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1228 +// +kubebuilder:printcolumn:name=Host,JSONPath=.status.ingress[0].host,type=string +// +kubebuilder:printcolumn:name=Admitted,JSONPath=.status.ingress[0].conditions[?(@.type=="Admitted")].status,type=string +// +kubebuilder:printcolumn:name=Service,JSONPath=.spec.to.name,type=string +// +kubebuilder:printcolumn:name=TLS,JSONPath=.spec.tls.type,type=string + +// A route allows developers to expose services through an HTTP(S) aware load balancing and proxy +// layer via a public DNS entry. The route may further specify TLS options and a certificate, or +// specify a public CNAME that the router should also accept for HTTP and HTTPS traffic. An +// administrator typically configures their router to be visible outside the cluster firewall, and +// may also add additional security, caching, or traffic controls on the service content. Routers +// usually talk directly to the service endpoints. +// +// Once a route is created, the `host` field may not be changed. Generally, routers use the oldest +// route with a given host when resolving conflicts. +// +// Routers are subject to additional customization and may support additional controls via the +// annotations field. +// +// Because administrators may configure multiple routers, the route status field is used to +// return information to clients about the names and states of the route under each router. +// If a client chooses a duplicate name, for instance, the route status conditions are used +// to indicate the route cannot be chosen. +// +// To enable HTTP/2 ALPN on a route it requires a custom +// (non-wildcard) certificate. This prevents connection coalescing by +// clients, notably web browsers. We do not support HTTP/2 ALPN on +// routes that use the default certificate because of the risk of +// connection re-use/coalescing. Routes that do not have their own +// custom certificate will not be HTTP/2 ALPN-enabled on either the +// frontend or the backend. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Route struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the desired state of the route + // +kubebuilder:validation:XValidation:rule="!has(self.tls) || self.tls.termination != 'passthrough' || !has(self.httpHeaders)",message="header actions are not permitted when tls termination is passthrough." + Spec RouteSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // status is the current state of the route + // +optional + Status RouteStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RouteList is a collection of Routes. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type RouteList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of routes + Items []Route `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// RouteSpec describes the hostname or path the route exposes, any security information, +// and one to four backends (services) the route points to. Requests are distributed +// among the backends depending on the weights assigned to each backend. When using +// roundrobin scheduling the portion of requests that go to each backend is the backend +// weight divided by the sum of all of the backend weights. When the backend has more than +// one endpoint the requests that end up on the backend are roundrobin distributed among +// the endpoints. Weights are between 0 and 256 with default 100. Weight 0 causes no requests +// to the backend. If all weights are zero the route will be considered to have no backends +// and return a standard 503 response. +// +// The `tls` field is optional and allows specific certificates or behavior for the +// route. Routers typically configure a default certificate on a wildcard domain to +// terminate routes without explicit certificates, but custom hostnames usually must +// choose passthrough (send traffic directly to the backend via the TLS Server-Name- +// Indication field) or provide a certificate. +type RouteSpec struct { + // host is an alias/DNS that points to the service. Optional. + // If not specified a route name will typically be automatically + // chosen. + // Must follow DNS952 subdomain conventions. + // + // +optional + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` + Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` + // subdomain is a DNS subdomain that is requested within the ingress controller's + // domain (as a subdomain). If host is set this field is ignored. An ingress + // controller may choose to ignore this suggested name, in which case the controller + // will report the assigned name in the status.ingress array or refuse to admit the + // route. If this value is set and the server does not support this field host will + // be populated automatically. Otherwise host is left empty. The field may have + // multiple parts separated by a dot, but not all ingress controllers may honor + // the request. This field may not be changed after creation except by a user with + // the update routes/custom-host permission. + // + // Example: subdomain `frontend` automatically receives the router subdomain + // `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`. + // + // +optional + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` + Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,8,opt,name=subdomain"` + + // path that the router watches for, to route traffic for to the service. Optional + // + // +optional + // +kubebuilder:validation:Pattern=`^/` + Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` + + // to is an object the route should use as the primary backend. Only the Service kind + // is allowed, and it will be defaulted to Service. If the weight field (0-256 default 100) + // is set to zero, no traffic will be sent to this backend. + To RouteTargetReference `json:"to" protobuf:"bytes,3,opt,name=to"` + + // alternateBackends allows up to 3 additional backends to be assigned to the route. + // Only the Service kind is allowed, and it will be defaulted to Service. + // Use the weight field in RouteTargetReference object to specify relative preference. + // + // +kubebuilder:validation:MaxItems=3 + // +listType=map + // +listMapKey=name + // +listMapKey=kind + AlternateBackends []RouteTargetReference `json:"alternateBackends,omitempty" protobuf:"bytes,4,rep,name=alternateBackends"` + + // If specified, the port to be used by the router. Most routers will use all + // endpoints exposed by the service by default - set this value to instruct routers + // which port to use. + Port *RoutePort `json:"port,omitempty" protobuf:"bytes,5,opt,name=port"` + + // The tls field provides the ability to configure certificates and termination for the route. + TLS *TLSConfig `json:"tls,omitempty" protobuf:"bytes,6,opt,name=tls"` + + // Wildcard policy if any for the route. + // Currently only 'Subdomain' or 'None' is allowed. + // + // +kubebuilder:validation:Enum=None;Subdomain;"" + // +kubebuilder:default=None + WildcardPolicy WildcardPolicyType `json:"wildcardPolicy,omitempty" protobuf:"bytes,7,opt,name=wildcardPolicy"` + + // httpHeaders defines policy for HTTP headers. + // + // +optional + HTTPHeaders *RouteHTTPHeaders `json:"httpHeaders,omitempty" protobuf:"bytes,9,opt,name=httpHeaders"` +} + +// RouteHTTPHeaders defines policy for HTTP headers. +type RouteHTTPHeaders struct { + // actions specifies options for modifying headers and their values. + // Note that this option only applies to cleartext HTTP connections + // and to secure HTTP connections for which the ingress controller + // terminates encryption (that is, edge-terminated or reencrypt + // connections). Headers cannot be modified for TLS passthrough + // connections. + // Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. + // `Strict-Transport-Security` may only be configured using the "haproxy.router.openshift.io/hsts_header" + // route annotation, and only in accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. + // In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after + // the actions specified in the IngressController's spec.httpHeaders.actions field. + // In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be + // executed after the actions specified in the Route's spec.httpHeaders.actions field. + // The headers set via this API will not appear in access logs. + // Any actions defined here are applied after any actions related to the following other fields: + // cache-control, spec.clientTLS, + // spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, + // and spec.httpHeaders.headerNameCaseAdjustments. + // The following header names are reserved and may not be modified via this API: + // Strict-Transport-Security, Proxy, Cookie, Set-Cookie. + // Note that the total size of all net added headers *after* interpolating dynamic values + // must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the + // IngressController. Please refer to the documentation + // for that API field for more details. + // +optional + Actions RouteHTTPHeaderActions `json:"actions,omitempty" protobuf:"bytes,1,opt,name=actions"` +} + +// RouteHTTPHeaderActions defines configuration for actions on HTTP request and response headers. +type RouteHTTPHeaderActions struct { + // response is a list of HTTP response headers to modify. + // Currently, actions may define to either `Set` or `Delete` headers values. + // Actions defined here will modify the response headers of all requests made through a route. + // These actions are applied to a specific Route defined within a cluster i.e. connections made through a route. + // Route actions will be executed before IngressController actions for response headers. + // Actions are applied in sequence as defined in this list. + // A maximum of 20 response header actions may be configured. + // You can use this field to specify HTTP response headers that should be set or deleted + // when forwarding responses from your application to the client. + // Sample fetchers allowed are "res.hdr" and "ssl_c_der". + // Converters allowed are "lower" and "base64". + // Example header values: "%[res.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". + // Note: This field cannot be used if your route uses TLS passthrough. + // + --- + // + Note: Any change to regex mentioned below must be reflected in the CRD validation of route in https://github.com/openshift/library-go/blob/master/pkg/route/validation/validation.go and vice-versa. + // +listType=map + // +listMapKey=name + // +optional + // +kubebuilder:validation:MaxItems=20 + // +kubebuilder:validation:XValidation:rule=`self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:res\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$')))`,message="Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are res.hdr, ssl_c_der. Converters allowed are lower, base64." + Response []RouteHTTPHeader `json:"response" protobuf:"bytes,1,rep,name=response"` + // request is a list of HTTP request headers to modify. + // Currently, actions may define to either `Set` or `Delete` headers values. + // Actions defined here will modify the request headers of all requests made through a route. + // These actions are applied to a specific Route defined within a cluster i.e. connections made through a route. + // Currently, actions may define to either `Set` or `Delete` headers values. + // Route actions will be executed after IngressController actions for request headers. + // Actions are applied in sequence as defined in this list. + // A maximum of 20 request header actions may be configured. + // You can use this field to specify HTTP request headers that should be set or deleted + // when forwarding connections from the client to your application. + // Sample fetchers allowed are "req.hdr" and "ssl_c_der". + // Converters allowed are "lower" and "base64". + // Example header values: "%[req.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". + // Any request header configuration applied directly via a Route resource using this API + // will override header configuration for a header of the same name applied via + // spec.httpHeaders.actions on the IngressController or route annotation. + // Note: This field cannot be used if your route uses TLS passthrough. + // + --- + // + Note: Any change to regex mentioned below must be reflected in the CRD validation of route in https://github.com/openshift/library-go/blob/master/pkg/route/validation/validation.go and vice-versa. + // +listType=map + // +listMapKey=name + // +optional + // +kubebuilder:validation:MaxItems=20 + // +kubebuilder:validation:XValidation:rule=`self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:req\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$')))`,message="Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64." + Request []RouteHTTPHeader `json:"request" protobuf:"bytes,2,rep,name=request"` +} + +// RouteHTTPHeader specifies configuration for setting or deleting an HTTP header. +type RouteHTTPHeader struct { + // name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header + // name as defined in RFC 2616 section 4.2. + // The name must consist only of alphanumeric and the following special characters, "-!#$%&'*+.^_`". + // The following header names are reserved and may not be modified via this API: + // Strict-Transport-Security, Proxy, Cookie, Set-Cookie. + // It must be no more than 255 characters in length. + // Header name must be unique. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'strict-transport-security'",message="strict-transport-security header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'proxy'",message="proxy header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'cookie'",message="cookie header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'set-cookie'",message="set-cookie header may not be modified via header actions" + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // action specifies actions to perform on headers, such as setting or deleting headers. + // +required + Action RouteHTTPHeaderActionUnion `json:"action" protobuf:"bytes,2,opt,name=action"` +} + +// RouteHTTPHeaderActionUnion specifies an action to take on an HTTP header. +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Set' ? has(self.set) : !has(self.set)",message="set is required when type is Set, and forbidden otherwise" +// +union +type RouteHTTPHeaderActionUnion struct { + // type defines the type of the action to be applied on the header. + // Possible values are Set or Delete. + // Set allows you to set HTTP request and response headers. + // Delete allows you to delete HTTP request and response headers. + // +unionDiscriminator + // +kubebuilder:validation:Enum:=Set;Delete + // +required + Type RouteHTTPHeaderActionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=RouteHTTPHeaderActionType"` + + // set defines the HTTP header that should be set: added if it doesn't exist or replaced if it does. + // This field is required when type is Set and forbidden otherwise. + // +optional + // +unionMember + Set *RouteSetHTTPHeader `json:"set,omitempty" protobuf:"bytes,2,opt,name=set"` +} + +// RouteSetHTTPHeader specifies what value needs to be set on an HTTP header. +type RouteSetHTTPHeader struct { + // value specifies a header value. + // Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in + // http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and + // otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. + // The value of this field must be no more than 16384 characters in length. + // Note that the total size of all net added headers *after* interpolating dynamic values + // must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the + // IngressController. + // + --- + // + Note: This limit was selected as most common web servers have a limit of 16384 characters or some lower limit. + // + See . + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=16384 + Value string `json:"value" protobuf:"bytes,1,opt,name=value"` +} + +// RouteHTTPHeaderActionType defines actions that can be performed on HTTP headers. +type RouteHTTPHeaderActionType string + +const ( + // Set specifies that an HTTP header should be set. + Set RouteHTTPHeaderActionType = "Set" + // Delete specifies that an HTTP header should be deleted. + Delete RouteHTTPHeaderActionType = "Delete" +) + +// RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' +// kind is allowed. Use 'weight' field to emphasize one over others. +type RouteTargetReference struct { + // The kind of target that the route is referring to. Currently, only 'Service' is allowed + // + // +kubebuilder:validation:Enum=Service;"" + // +kubebuilder:default=Service + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + + // name of the service/target that is being referred to. e.g. name of the service + // + // +kubebuilder:validation:MinLength=1 + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // weight as an integer between 0 and 256, default 100, that specifies the target's relative weight + // against other target reference objects. 0 suppresses requests to this backend. + // + // +optional + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=256 + // +kubebuilder:default=100 + Weight *int32 `json:"weight" protobuf:"varint,3,opt,name=weight"` +} + +// RoutePort defines a port mapping from a router to an endpoint in the service endpoints. +type RoutePort struct { + // The target port on pods selected by the service this route points to. + // If this is a string, it will be looked up as a named port in the target + // endpoints port list. Required + TargetPort intstr.IntOrString `json:"targetPort" protobuf:"bytes,1,opt,name=targetPort"` +} + +// RouteStatus provides relevant info about the status of a route, including which routers +// acknowledge it. +type RouteStatus struct { + // ingress describes the places where the route may be exposed. The list of + // ingress points may contain duplicate Host or RouterName values. Routes + // are considered live once they are `Ready` + // +listType=atomic + Ingress []RouteIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` +} + +// RouteIngress holds information about the places where a route is exposed. +type RouteIngress struct { + // host is the host string under which the route is exposed; this value is required + Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` + // Name is a name chosen by the router to identify itself; this value is required + RouterName string `json:"routerName,omitempty" protobuf:"bytes,2,opt,name=routerName"` + // conditions is the state of the route, may be empty. + // +listType=map + // +listMapKey=type + Conditions []RouteIngressCondition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"` + // Wildcard policy is the wildcard policy that was allowed where this route is exposed. + WildcardPolicy WildcardPolicyType `json:"wildcardPolicy,omitempty" protobuf:"bytes,4,opt,name=wildcardPolicy"` + // CanonicalHostname is the external host name for the router that can be used as a CNAME + // for the host requested for this route. This value is optional and may not be set in all cases. + RouterCanonicalHostname string `json:"routerCanonicalHostname,omitempty" protobuf:"bytes,5,opt,name=routerCanonicalHostname"` +} + +// RouteIngressConditionType is a valid value for RouteCondition +type RouteIngressConditionType string + +// These are valid conditions of pod. +const ( + // RouteAdmitted means the route is able to service requests for the provided Host + RouteAdmitted RouteIngressConditionType = "Admitted" + // RouteUnservableInFutureVersions indicates that the route is using an unsupported + // configuration that may be incompatible with a future version of OpenShift. + RouteUnservableInFutureVersions RouteIngressConditionType = "UnservableInFutureVersions" +) + +// RouteIngressCondition contains details for the current condition of this route on a particular +// router. +type RouteIngressCondition struct { + // type is the type of the condition. + // Currently only Admitted or UnservableInFutureVersions. + Type RouteIngressConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=RouteIngressConditionType"` + // status is the status of the condition. + // Can be True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // (brief) reason for the condition's last transition, and is usually a machine and human + // readable constant + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // Human readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` + // RFC 3339 date and time when this condition last transitioned + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,5,opt,name=lastTransitionTime"` +} + +// RouterShard has information of a routing shard and is used to +// generate host names and routing table entries when a routing shard is +// allocated for a specific route. +// Caveat: This is WIP and will likely undergo modifications when sharding +// support is added. +type RouterShard struct { + // shardName uniquely identifies a router shard in the "set" of + // routers used for routing traffic to the services. + ShardName string `json:"shardName" protobuf:"bytes,1,opt,name=shardName"` + + // dnsSuffix for the shard ala: shard-1.v3.openshift.com + DNSSuffix string `json:"dnsSuffix" protobuf:"bytes,2,opt,name=dnsSuffix"` +} + +// TLSConfig defines config used to secure a route and provide termination +// +// +kubebuilder:validation:XValidation:rule="has(self.termination) && has(self.insecureEdgeTerminationPolicy) ? !((self.termination=='passthrough') && (self.insecureEdgeTerminationPolicy=='Allow')) : true", message="cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=RouteExternalCertificate,rule="!(has(self.certificate) && has(self.externalCertificate))", message="cannot have both spec.tls.certificate and spec.tls.externalCertificate" +type TLSConfig struct { + // termination indicates termination type. + // + // * edge - TLS termination is done by the router and http is used to communicate with the backend (default) + // * passthrough - Traffic is sent straight to the destination without the router providing TLS termination + // * reencrypt - TLS termination is done by the router and https is used to communicate with the backend + // + // Note: passthrough termination is incompatible with httpHeader actions + // +kubebuilder:validation:Enum=edge;reencrypt;passthrough + Termination TLSTerminationType `json:"termination" protobuf:"bytes,1,opt,name=termination,casttype=TLSTerminationType"` + + // certificate provides certificate contents. This should be a single serving certificate, not a certificate + // chain. Do not include a CA certificate. + Certificate string `json:"certificate,omitempty" protobuf:"bytes,2,opt,name=certificate"` + + // key provides key file contents + Key string `json:"key,omitempty" protobuf:"bytes,3,opt,name=key"` + + // caCertificate provides the cert authority certificate contents + CACertificate string `json:"caCertificate,omitempty" protobuf:"bytes,4,opt,name=caCertificate"` + + // destinationCACertificate provides the contents of the ca certificate of the final destination. When using reencrypt + // termination this file should be provided in order to have routers use it for health checks on the secure connection. + // If this field is not specified, the router may provide its own destination CA and perform hostname validation using + // the short service name (service.namespace.svc), which allows infrastructure generated certificates to automatically + // verify. + DestinationCACertificate string `json:"destinationCACertificate,omitempty" protobuf:"bytes,5,opt,name=destinationCACertificate"` + + // insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While + // each router may make its own decisions on which ports to expose, this is normally port 80. + // + // If a route does not specify insecureEdgeTerminationPolicy, then the default behavior is "None". + // + // * Allow - traffic is sent to the server on the insecure port (edge/reencrypt terminations only). + // + // * None - no traffic is allowed on the insecure port (default). + // + // * Redirect - clients are redirected to the secure port. + // + // +kubebuilder:validation:Enum=Allow;None;Redirect;"" + InsecureEdgeTerminationPolicy InsecureEdgeTerminationPolicyType `json:"insecureEdgeTerminationPolicy,omitempty" protobuf:"bytes,6,opt,name=insecureEdgeTerminationPolicy,casttype=InsecureEdgeTerminationPolicyType"` + + // externalCertificate provides certificate contents as a secret reference. + // This should be a single serving certificate, not a certificate + // chain. Do not include a CA certificate. The secret referenced should + // be present in the same namespace as that of the Route. + // Forbidden when `certificate` is set. + // + // +openshift:enable:FeatureGate=RouteExternalCertificate + // +optional + ExternalCertificate *LocalObjectReference `json:"externalCertificate,omitempty" protobuf:"bytes,7,opt,name=externalCertificate"` +} + +// LocalObjectReference contains enough information to let you locate the +// referenced object inside the same namespace. +// +structType=atomic +type LocalObjectReference struct { + // name of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` +} + +// TLSTerminationType dictates where the secure communication will stop +// TODO: Reconsider this type in v2 +type TLSTerminationType string + +// InsecureEdgeTerminationPolicyType dictates the behavior of insecure +// connections to an edge-terminated route. +type InsecureEdgeTerminationPolicyType string + +const ( + // TLSTerminationEdge terminate encryption at the edge router. + TLSTerminationEdge TLSTerminationType = "edge" + // TLSTerminationPassthrough terminate encryption at the destination, the destination is responsible for decrypting traffic + TLSTerminationPassthrough TLSTerminationType = "passthrough" + // TLSTerminationReencrypt terminate encryption at the edge router and re-encrypt it with a new certificate supplied by the destination + TLSTerminationReencrypt TLSTerminationType = "reencrypt" + + // InsecureEdgeTerminationPolicyNone disables insecure connections for an edge-terminated route. + InsecureEdgeTerminationPolicyNone InsecureEdgeTerminationPolicyType = "None" + // InsecureEdgeTerminationPolicyAllow allows insecure connections for an edge-terminated route. + InsecureEdgeTerminationPolicyAllow InsecureEdgeTerminationPolicyType = "Allow" + // InsecureEdgeTerminationPolicyRedirect redirects insecure connections for an edge-terminated route. + // As an example, for routers that support HTTP and HTTPS, the + // insecure HTTP connections will be redirected to use HTTPS. + InsecureEdgeTerminationPolicyRedirect InsecureEdgeTerminationPolicyType = "Redirect" +) + +// WildcardPolicyType indicates the type of wildcard support needed by routes. +type WildcardPolicyType string + +const ( + // WildcardPolicyNone indicates no wildcard support is needed. + WildcardPolicyNone WildcardPolicyType = "None" + + // WildcardPolicySubdomain indicates the host needs wildcard support for the subdomain. + // Example: For host = "www.acme.test", indicates that the router + // should support requests for *.acme.test + // Note that this will not match acme.test only *.acme.test + WildcardPolicySubdomain WildcardPolicyType = "Subdomain" +) + +// Route Annotations +const ( + // AllowNonDNSCompliantHostAnnotation indicates that the host name in a route + // configuration is not required to follow strict DNS compliance. + // Unless the annotation is set to true, the route host name must have at least one label. + // Labels must have no more than 63 characters from the set of + // alphanumeric characters, '-' or '.', and must start and end with an alphanumeric + // character. A trailing dot is not allowed. The total host name length must be no more + // than 253 characters. + // + // When the annotation is set to true, the host name must pass a smaller set of + // requirements, i.e.: character set as described above, and total host name + // length must be no more than 253 characters. + // + // NOTE: use of this annotation may validate routes that cannot be admitted and will + // not function. The annotation is provided to allow a custom scenario, e.g. a custom + // ingress controller that relies on the route API, but for some customized purpose + // needs to use routes with invalid hosts. + AllowNonDNSCompliantHostAnnotation = "route.openshift.io/allow-non-dns-compliant-host" +) + +// Ingress-to-route controller +const ( + // IngressToRouteIngressClassControllerName is the name of the + // controller that translates ingresses into routes. This value is + // intended to be used for the spec.controller field of ingressclasses. + IngressToRouteIngressClassControllerName = "openshift.io/ingress-to-route" +) diff --git a/vendor/github.com/openshift/api/route/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/route/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..23a2edd423fe7 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/zz_generated.deepcopy.go @@ -0,0 +1,368 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. +func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { + if in == nil { + return nil + } + out := new(LocalObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Route) DeepCopyInto(out *Route) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Route. +func (in *Route) DeepCopy() *Route { + if in == nil { + return nil + } + out := new(Route) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Route) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteHTTPHeader) DeepCopyInto(out *RouteHTTPHeader) { + *out = *in + in.Action.DeepCopyInto(&out.Action) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteHTTPHeader. +func (in *RouteHTTPHeader) DeepCopy() *RouteHTTPHeader { + if in == nil { + return nil + } + out := new(RouteHTTPHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteHTTPHeaderActionUnion) DeepCopyInto(out *RouteHTTPHeaderActionUnion) { + *out = *in + if in.Set != nil { + in, out := &in.Set, &out.Set + *out = new(RouteSetHTTPHeader) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteHTTPHeaderActionUnion. +func (in *RouteHTTPHeaderActionUnion) DeepCopy() *RouteHTTPHeaderActionUnion { + if in == nil { + return nil + } + out := new(RouteHTTPHeaderActionUnion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteHTTPHeaderActions) DeepCopyInto(out *RouteHTTPHeaderActions) { + *out = *in + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = make([]RouteHTTPHeader, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = make([]RouteHTTPHeader, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteHTTPHeaderActions. +func (in *RouteHTTPHeaderActions) DeepCopy() *RouteHTTPHeaderActions { + if in == nil { + return nil + } + out := new(RouteHTTPHeaderActions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteHTTPHeaders) DeepCopyInto(out *RouteHTTPHeaders) { + *out = *in + in.Actions.DeepCopyInto(&out.Actions) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteHTTPHeaders. +func (in *RouteHTTPHeaders) DeepCopy() *RouteHTTPHeaders { + if in == nil { + return nil + } + out := new(RouteHTTPHeaders) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteIngress) DeepCopyInto(out *RouteIngress) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]RouteIngressCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteIngress. +func (in *RouteIngress) DeepCopy() *RouteIngress { + if in == nil { + return nil + } + out := new(RouteIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteIngressCondition) DeepCopyInto(out *RouteIngressCondition) { + *out = *in + if in.LastTransitionTime != nil { + in, out := &in.LastTransitionTime, &out.LastTransitionTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteIngressCondition. +func (in *RouteIngressCondition) DeepCopy() *RouteIngressCondition { + if in == nil { + return nil + } + out := new(RouteIngressCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteList) DeepCopyInto(out *RouteList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Route, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteList. +func (in *RouteList) DeepCopy() *RouteList { + if in == nil { + return nil + } + out := new(RouteList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RouteList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutePort) DeepCopyInto(out *RoutePort) { + *out = *in + out.TargetPort = in.TargetPort + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutePort. +func (in *RoutePort) DeepCopy() *RoutePort { + if in == nil { + return nil + } + out := new(RoutePort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteSetHTTPHeader) DeepCopyInto(out *RouteSetHTTPHeader) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteSetHTTPHeader. +func (in *RouteSetHTTPHeader) DeepCopy() *RouteSetHTTPHeader { + if in == nil { + return nil + } + out := new(RouteSetHTTPHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteSpec) DeepCopyInto(out *RouteSpec) { + *out = *in + in.To.DeepCopyInto(&out.To) + if in.AlternateBackends != nil { + in, out := &in.AlternateBackends, &out.AlternateBackends + *out = make([]RouteTargetReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(RoutePort) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } + if in.HTTPHeaders != nil { + in, out := &in.HTTPHeaders, &out.HTTPHeaders + *out = new(RouteHTTPHeaders) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteSpec. +func (in *RouteSpec) DeepCopy() *RouteSpec { + if in == nil { + return nil + } + out := new(RouteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteStatus) DeepCopyInto(out *RouteStatus) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]RouteIngress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteStatus. +func (in *RouteStatus) DeepCopy() *RouteStatus { + if in == nil { + return nil + } + out := new(RouteStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteTargetReference) DeepCopyInto(out *RouteTargetReference) { + *out = *in + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteTargetReference. +func (in *RouteTargetReference) DeepCopy() *RouteTargetReference { + if in == nil { + return nil + } + out := new(RouteTargetReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouterShard) DeepCopyInto(out *RouterShard) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouterShard. +func (in *RouterShard) DeepCopy() *RouterShard { + if in == nil { + return nil + } + out := new(RouterShard) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfig) DeepCopyInto(out *TLSConfig) { + *out = *in + if in.ExternalCertificate != nil { + in, out := &in.ExternalCertificate, &out.ExternalCertificate + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig. +func (in *TLSConfig) DeepCopy() *TLSConfig { + if in == nil { + return nil + } + out := new(TLSConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/route/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/route/v1/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 0000000000000..0277ba2f322c9 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,34 @@ +routes.route.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/1228 + CRDName: routes.route.openshift.io + Capability: "" + Category: "" + FeatureGates: + - RouteExternalCertificate + FilenameOperatorName: "" + FilenameOperatorOrdering: "" + FilenameRunLevel: "" + GroupName: route.openshift.io + HasStatus: true + KindName: Route + Labels: {} + PluralName: routes + PrinterColumns: + - jsonPath: .status.ingress[0].host + name: Host + type: string + - jsonPath: .status.ingress[0].conditions[?(@.type=="Admitted")].status + name: Admitted + type: string + - jsonPath: .spec.to.name + name: Service + type: string + - jsonPath: .spec.tls.type + name: TLS + type: string + Scope: Namespaced + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + diff --git a/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000..1d59f10335060 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,189 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_LocalObjectReference = map[string]string{ + "": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", + "name": "name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", +} + +func (LocalObjectReference) SwaggerDoc() map[string]string { + return map_LocalObjectReference +} + +var map_Route = map[string]string{ + "": "A route allows developers to expose services through an HTTP(S) aware load balancing and proxy layer via a public DNS entry. The route may further specify TLS options and a certificate, or specify a public CNAME that the router should also accept for HTTP and HTTPS traffic. An administrator typically configures their router to be visible outside the cluster firewall, and may also add additional security, caching, or traffic controls on the service content. Routers usually talk directly to the service endpoints.\n\nOnce a route is created, the `host` field may not be changed. Generally, routers use the oldest route with a given host when resolving conflicts.\n\nRouters are subject to additional customization and may support additional controls via the annotations field.\n\nBecause administrators may configure multiple routers, the route status field is used to return information to clients about the names and states of the route under each router. If a client chooses a duplicate name, for instance, the route status conditions are used to indicate the route cannot be chosen.\n\nTo enable HTTP/2 ALPN on a route it requires a custom (non-wildcard) certificate. This prevents connection coalescing by clients, notably web browsers. We do not support HTTP/2 ALPN on routes that use the default certificate because of the risk of connection re-use/coalescing. Routes that do not have their own custom certificate will not be HTTP/2 ALPN-enabled on either the frontend or the backend.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the route", + "status": "status is the current state of the route", +} + +func (Route) SwaggerDoc() map[string]string { + return map_Route +} + +var map_RouteHTTPHeader = map[string]string{ + "": "RouteHTTPHeader specifies configuration for setting or deleting an HTTP header.", + "name": "name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, \"-!#$%&'*+.^_`\". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique.", + "action": "action specifies actions to perform on headers, such as setting or deleting headers.", +} + +func (RouteHTTPHeader) SwaggerDoc() map[string]string { + return map_RouteHTTPHeader +} + +var map_RouteHTTPHeaderActionUnion = map[string]string{ + "": "RouteHTTPHeaderActionUnion specifies an action to take on an HTTP header.", + "type": "type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers.", + "set": "set defines the HTTP header that should be set: added if it doesn't exist or replaced if it does. This field is required when type is Set and forbidden otherwise.", +} + +func (RouteHTTPHeaderActionUnion) SwaggerDoc() map[string]string { + return map_RouteHTTPHeaderActionUnion +} + +var map_RouteHTTPHeaderActions = map[string]string{ + "": "RouteHTTPHeaderActions defines configuration for actions on HTTP request and response headers.", + "response": "response is a list of HTTP response headers to modify. Currently, actions may define to either `Set` or `Delete` headers values. Actions defined here will modify the response headers of all requests made through a route. These actions are applied to a specific Route defined within a cluster i.e. connections made through a route. Route actions will be executed before IngressController actions for response headers. Actions are applied in sequence as defined in this list. A maximum of 20 response header actions may be configured. You can use this field to specify HTTP response headers that should be set or deleted when forwarding responses from your application to the client. Sample fetchers allowed are \"res.hdr\" and \"ssl_c_der\". Converters allowed are \"lower\" and \"base64\". Example header values: \"%[res.hdr(X-target),lower]\", \"%{+Q}[ssl_c_der,base64]\". Note: This field cannot be used if your route uses TLS passthrough. ", + "request": "request is a list of HTTP request headers to modify. Currently, actions may define to either `Set` or `Delete` headers values. Actions defined here will modify the request headers of all requests made through a route. These actions are applied to a specific Route defined within a cluster i.e. connections made through a route. Currently, actions may define to either `Set` or `Delete` headers values. Route actions will be executed after IngressController actions for request headers. Actions are applied in sequence as defined in this list. A maximum of 20 request header actions may be configured. You can use this field to specify HTTP request headers that should be set or deleted when forwarding connections from the client to your application. Sample fetchers allowed are \"req.hdr\" and \"ssl_c_der\". Converters allowed are \"lower\" and \"base64\". Example header values: \"%[req.hdr(X-target),lower]\", \"%{+Q}[ssl_c_der,base64]\". Any request header configuration applied directly via a Route resource using this API will override header configuration for a header of the same name applied via spec.httpHeaders.actions on the IngressController or route annotation. Note: This field cannot be used if your route uses TLS passthrough. ", +} + +func (RouteHTTPHeaderActions) SwaggerDoc() map[string]string { + return map_RouteHTTPHeaderActions +} + +var map_RouteHTTPHeaders = map[string]string{ + "": "RouteHTTPHeaders defines policy for HTTP headers.", + "actions": "actions specifies options for modifying headers and their values. Note that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be modified for TLS passthrough connections. Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. `Strict-Transport-Security` may only be configured using the \"haproxy.router.openshift.io/hsts_header\" route annotation, and only in accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after the actions specified in the IngressController's spec.httpHeaders.actions field. In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be executed after the actions specified in the Route's spec.httpHeaders.actions field. The headers set via this API will not appear in access logs. Any actions defined here are applied after any actions related to the following other fields: cache-control, spec.clientTLS, spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, and spec.httpHeaders.headerNameCaseAdjustments. The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Cookie, Set-Cookie. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. Please refer to the documentation for that API field for more details.", +} + +func (RouteHTTPHeaders) SwaggerDoc() map[string]string { + return map_RouteHTTPHeaders +} + +var map_RouteIngress = map[string]string{ + "": "RouteIngress holds information about the places where a route is exposed.", + "host": "host is the host string under which the route is exposed; this value is required", + "routerName": "Name is a name chosen by the router to identify itself; this value is required", + "conditions": "conditions is the state of the route, may be empty.", + "wildcardPolicy": "Wildcard policy is the wildcard policy that was allowed where this route is exposed.", + "routerCanonicalHostname": "CanonicalHostname is the external host name for the router that can be used as a CNAME for the host requested for this route. This value is optional and may not be set in all cases.", +} + +func (RouteIngress) SwaggerDoc() map[string]string { + return map_RouteIngress +} + +var map_RouteIngressCondition = map[string]string{ + "": "RouteIngressCondition contains details for the current condition of this route on a particular router.", + "type": "type is the type of the condition. Currently only Admitted or UnservableInFutureVersions.", + "status": "status is the status of the condition. Can be True, False, Unknown.", + "reason": "(brief) reason for the condition's last transition, and is usually a machine and human readable constant", + "message": "Human readable message indicating details about last transition.", + "lastTransitionTime": "RFC 3339 date and time when this condition last transitioned", +} + +func (RouteIngressCondition) SwaggerDoc() map[string]string { + return map_RouteIngressCondition +} + +var map_RouteList = map[string]string{ + "": "RouteList is a collection of Routes.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of routes", +} + +func (RouteList) SwaggerDoc() map[string]string { + return map_RouteList +} + +var map_RoutePort = map[string]string{ + "": "RoutePort defines a port mapping from a router to an endpoint in the service endpoints.", + "targetPort": "The target port on pods selected by the service this route points to. If this is a string, it will be looked up as a named port in the target endpoints port list. Required", +} + +func (RoutePort) SwaggerDoc() map[string]string { + return map_RoutePort +} + +var map_RouteSetHTTPHeader = map[string]string{ + "": "RouteSetHTTPHeader specifies what value needs to be set on an HTTP header.", + "value": "value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. ", +} + +func (RouteSetHTTPHeader) SwaggerDoc() map[string]string { + return map_RouteSetHTTPHeader +} + +var map_RouteSpec = map[string]string{ + "": "RouteSpec describes the hostname or path the route exposes, any security information, and one to four backends (services) the route points to. Requests are distributed among the backends depending on the weights assigned to each backend. When using roundrobin scheduling the portion of requests that go to each backend is the backend weight divided by the sum of all of the backend weights. When the backend has more than one endpoint the requests that end up on the backend are roundrobin distributed among the endpoints. Weights are between 0 and 256 with default 100. Weight 0 causes no requests to the backend. If all weights are zero the route will be considered to have no backends and return a standard 503 response.\n\nThe `tls` field is optional and allows specific certificates or behavior for the route. Routers typically configure a default certificate on a wildcard domain to terminate routes without explicit certificates, but custom hostnames usually must choose passthrough (send traffic directly to the backend via the TLS Server-Name- Indication field) or provide a certificate.", + "host": "host is an alias/DNS that points to the service. Optional. If not specified a route name will typically be automatically chosen. Must follow DNS952 subdomain conventions.", + "subdomain": "subdomain is a DNS subdomain that is requested within the ingress controller's domain (as a subdomain). If host is set this field is ignored. An ingress controller may choose to ignore this suggested name, in which case the controller will report the assigned name in the status.ingress array or refuse to admit the route. If this value is set and the server does not support this field host will be populated automatically. Otherwise host is left empty. The field may have multiple parts separated by a dot, but not all ingress controllers may honor the request. This field may not be changed after creation except by a user with the update routes/custom-host permission.\n\nExample: subdomain `frontend` automatically receives the router subdomain `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`.", + "path": "path that the router watches for, to route traffic for to the service. Optional", + "to": "to is an object the route should use as the primary backend. Only the Service kind is allowed, and it will be defaulted to Service. If the weight field (0-256 default 100) is set to zero, no traffic will be sent to this backend.", + "alternateBackends": "alternateBackends allows up to 3 additional backends to be assigned to the route. Only the Service kind is allowed, and it will be defaulted to Service. Use the weight field in RouteTargetReference object to specify relative preference.", + "port": "If specified, the port to be used by the router. Most routers will use all endpoints exposed by the service by default - set this value to instruct routers which port to use.", + "tls": "The tls field provides the ability to configure certificates and termination for the route.", + "wildcardPolicy": "Wildcard policy if any for the route. Currently only 'Subdomain' or 'None' is allowed.", + "httpHeaders": "httpHeaders defines policy for HTTP headers.", +} + +func (RouteSpec) SwaggerDoc() map[string]string { + return map_RouteSpec +} + +var map_RouteStatus = map[string]string{ + "": "RouteStatus provides relevant info about the status of a route, including which routers acknowledge it.", + "ingress": "ingress describes the places where the route may be exposed. The list of ingress points may contain duplicate Host or RouterName values. Routes are considered live once they are `Ready`", +} + +func (RouteStatus) SwaggerDoc() map[string]string { + return map_RouteStatus +} + +var map_RouteTargetReference = map[string]string{ + "": "RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' kind is allowed. Use 'weight' field to emphasize one over others.", + "kind": "The kind of target that the route is referring to. Currently, only 'Service' is allowed", + "name": "name of the service/target that is being referred to. e.g. name of the service", + "weight": "weight as an integer between 0 and 256, default 100, that specifies the target's relative weight against other target reference objects. 0 suppresses requests to this backend.", +} + +func (RouteTargetReference) SwaggerDoc() map[string]string { + return map_RouteTargetReference +} + +var map_RouterShard = map[string]string{ + "": "RouterShard has information of a routing shard and is used to generate host names and routing table entries when a routing shard is allocated for a specific route. Caveat: This is WIP and will likely undergo modifications when sharding support is added.", + "shardName": "shardName uniquely identifies a router shard in the \"set\" of routers used for routing traffic to the services.", + "dnsSuffix": "dnsSuffix for the shard ala: shard-1.v3.openshift.com", +} + +func (RouterShard) SwaggerDoc() map[string]string { + return map_RouterShard +} + +var map_TLSConfig = map[string]string{ + "": "TLSConfig defines config used to secure a route and provide termination", + "termination": "termination indicates termination type.\n\n* edge - TLS termination is done by the router and http is used to communicate with the backend (default) * passthrough - Traffic is sent straight to the destination without the router providing TLS termination * reencrypt - TLS termination is done by the router and https is used to communicate with the backend\n\nNote: passthrough termination is incompatible with httpHeader actions", + "certificate": "certificate provides certificate contents. This should be a single serving certificate, not a certificate chain. Do not include a CA certificate.", + "key": "key provides key file contents", + "caCertificate": "caCertificate provides the cert authority certificate contents", + "destinationCACertificate": "destinationCACertificate provides the contents of the ca certificate of the final destination. When using reencrypt termination this file should be provided in order to have routers use it for health checks on the secure connection. If this field is not specified, the router may provide its own destination CA and perform hostname validation using the short service name (service.namespace.svc), which allows infrastructure generated certificates to automatically verify.", + "insecureEdgeTerminationPolicy": "insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While each router may make its own decisions on which ports to expose, this is normally port 80.\n\nIf a route does not specify insecureEdgeTerminationPolicy, then the default behavior is \"None\".\n\n* Allow - traffic is sent to the server on the insecure port (edge/reencrypt terminations only).\n\n* None - no traffic is allowed on the insecure port (default).\n\n* Redirect - clients are redirected to the secure port.", + "externalCertificate": "externalCertificate provides certificate contents as a secret reference. This should be a single serving certificate, not a certificate chain. Do not include a CA certificate. The secret referenced should be present in the same namespace as that of the Route. Forbidden when `certificate` is set.", +} + +func (TLSConfig) SwaggerDoc() map[string]string { + return map_TLSConfig +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/security/install.go b/vendor/github.com/openshift/api/security/install.go new file mode 100644 index 0000000000000..c2b04c43298ff --- /dev/null +++ b/vendor/github.com/openshift/api/security/install.go @@ -0,0 +1,26 @@ +package security + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + securityv1 "github.com/openshift/api/security/v1" +) + +const ( + GroupName = "security.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(securityv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/security/v1/Makefile b/vendor/github.com/openshift/api/security/v1/Makefile new file mode 100644 index 0000000000000..096e6fa2c9c86 --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="security.openshift.io/v1" diff --git a/vendor/github.com/openshift/api/security/v1/consts.go b/vendor/github.com/openshift/api/security/v1/consts.go new file mode 100644 index 0000000000000..7e8adf6e64241 --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/consts.go @@ -0,0 +1,16 @@ +package v1 + +const ( + UIDRangeAnnotation = "openshift.io/sa.scc.uid-range" + // SupplementalGroupsAnnotation contains a comma delimited list of allocated supplemental groups + // for the namespace. Groups are in the form of a Block which supports {start}/{length} or {start}-{end} + SupplementalGroupsAnnotation = "openshift.io/sa.scc.supplemental-groups" + MCSAnnotation = "openshift.io/sa.scc.mcs" + ValidatedSCCAnnotation = "openshift.io/scc" + // This annotation pins required SCCs for core OpenShift workloads to prevent preemption of custom SCCs. + // It is being used in the SCC admission plugin. + RequiredSCCAnnotation = "openshift.io/required-scc" + + // MinimallySufficientPodSecurityStandard indicates the PodSecurityStandard that matched the SCCs available to the users of the namespace. + MinimallySufficientPodSecurityStandard = "security.openshift.io/MinimallySufficientPodSecurityStandard" +) diff --git a/vendor/github.com/openshift/api/security/v1/doc.go b/vendor/github.com/openshift/api/security/v1/doc.go new file mode 100644 index 0000000000000..44fe37eb2dcd9 --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/security/apis/security +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=security.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/security/v1/generated.pb.go b/vendor/github.com/openshift/api/security/v1/generated.pb.go new file mode 100644 index 0000000000000..e28b5958412c2 --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/generated.pb.go @@ -0,0 +1,5466 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/security/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } +func (*AllowedFlexVolume) ProtoMessage() {} +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{0} +} +func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedFlexVolume.Merge(m, src) +} +func (m *AllowedFlexVolume) XXX_Size() int { + return m.Size() +} +func (m *AllowedFlexVolume) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo + +func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } +func (*FSGroupStrategyOptions) ProtoMessage() {} +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{1} +} +func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) +} +func (m *FSGroupStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo + +func (m *IDRange) Reset() { *m = IDRange{} } +func (*IDRange) ProtoMessage() {} +func (*IDRange) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{2} +} +func (m *IDRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IDRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_IDRange.Merge(m, src) +} +func (m *IDRange) XXX_Size() int { + return m.Size() +} +func (m *IDRange) XXX_DiscardUnknown() { + xxx_messageInfo_IDRange.DiscardUnknown(m) +} + +var xxx_messageInfo_IDRange proto.InternalMessageInfo + +func (m *PodSecurityPolicyReview) Reset() { *m = PodSecurityPolicyReview{} } +func (*PodSecurityPolicyReview) ProtoMessage() {} +func (*PodSecurityPolicyReview) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{3} +} +func (m *PodSecurityPolicyReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicyReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicyReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicyReview.Merge(m, src) +} +func (m *PodSecurityPolicyReview) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicyReview) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicyReview.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicyReview proto.InternalMessageInfo + +func (m *PodSecurityPolicyReviewSpec) Reset() { *m = PodSecurityPolicyReviewSpec{} } +func (*PodSecurityPolicyReviewSpec) ProtoMessage() {} +func (*PodSecurityPolicyReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{4} +} +func (m *PodSecurityPolicyReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicyReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicyReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicyReviewSpec.Merge(m, src) +} +func (m *PodSecurityPolicyReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicyReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicyReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicyReviewSpec proto.InternalMessageInfo + +func (m *PodSecurityPolicyReviewStatus) Reset() { *m = PodSecurityPolicyReviewStatus{} } +func (*PodSecurityPolicyReviewStatus) ProtoMessage() {} +func (*PodSecurityPolicyReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{5} +} +func (m *PodSecurityPolicyReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicyReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicyReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicyReviewStatus.Merge(m, src) +} +func (m *PodSecurityPolicyReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicyReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicyReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicyReviewStatus proto.InternalMessageInfo + +func (m *PodSecurityPolicySelfSubjectReview) Reset() { *m = PodSecurityPolicySelfSubjectReview{} } +func (*PodSecurityPolicySelfSubjectReview) ProtoMessage() {} +func (*PodSecurityPolicySelfSubjectReview) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{6} +} +func (m *PodSecurityPolicySelfSubjectReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicySelfSubjectReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicySelfSubjectReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicySelfSubjectReview.Merge(m, src) +} +func (m *PodSecurityPolicySelfSubjectReview) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicySelfSubjectReview) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicySelfSubjectReview.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicySelfSubjectReview proto.InternalMessageInfo + +func (m *PodSecurityPolicySelfSubjectReviewSpec) Reset() { + *m = PodSecurityPolicySelfSubjectReviewSpec{} +} +func (*PodSecurityPolicySelfSubjectReviewSpec) ProtoMessage() {} +func (*PodSecurityPolicySelfSubjectReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{7} +} +func (m *PodSecurityPolicySelfSubjectReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicySelfSubjectReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicySelfSubjectReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicySelfSubjectReviewSpec.Merge(m, src) +} +func (m *PodSecurityPolicySelfSubjectReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicySelfSubjectReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicySelfSubjectReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicySelfSubjectReviewSpec proto.InternalMessageInfo + +func (m *PodSecurityPolicySubjectReview) Reset() { *m = PodSecurityPolicySubjectReview{} } +func (*PodSecurityPolicySubjectReview) ProtoMessage() {} +func (*PodSecurityPolicySubjectReview) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{8} +} +func (m *PodSecurityPolicySubjectReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicySubjectReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicySubjectReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicySubjectReview.Merge(m, src) +} +func (m *PodSecurityPolicySubjectReview) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicySubjectReview) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicySubjectReview.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicySubjectReview proto.InternalMessageInfo + +func (m *PodSecurityPolicySubjectReviewSpec) Reset() { *m = PodSecurityPolicySubjectReviewSpec{} } +func (*PodSecurityPolicySubjectReviewSpec) ProtoMessage() {} +func (*PodSecurityPolicySubjectReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{9} +} +func (m *PodSecurityPolicySubjectReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicySubjectReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicySubjectReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicySubjectReviewSpec.Merge(m, src) +} +func (m *PodSecurityPolicySubjectReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicySubjectReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicySubjectReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicySubjectReviewSpec proto.InternalMessageInfo + +func (m *PodSecurityPolicySubjectReviewStatus) Reset() { *m = PodSecurityPolicySubjectReviewStatus{} } +func (*PodSecurityPolicySubjectReviewStatus) ProtoMessage() {} +func (*PodSecurityPolicySubjectReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{10} +} +func (m *PodSecurityPolicySubjectReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicySubjectReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicySubjectReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicySubjectReviewStatus.Merge(m, src) +} +func (m *PodSecurityPolicySubjectReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicySubjectReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicySubjectReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicySubjectReviewStatus proto.InternalMessageInfo + +func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } +func (*RangeAllocation) ProtoMessage() {} +func (*RangeAllocation) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{11} +} +func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RangeAllocation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RangeAllocation) XXX_Merge(src proto.Message) { + xxx_messageInfo_RangeAllocation.Merge(m, src) +} +func (m *RangeAllocation) XXX_Size() int { + return m.Size() +} +func (m *RangeAllocation) XXX_DiscardUnknown() { + xxx_messageInfo_RangeAllocation.DiscardUnknown(m) +} + +var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo + +func (m *RangeAllocationList) Reset() { *m = RangeAllocationList{} } +func (*RangeAllocationList) ProtoMessage() {} +func (*RangeAllocationList) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{12} +} +func (m *RangeAllocationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RangeAllocationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RangeAllocationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RangeAllocationList.Merge(m, src) +} +func (m *RangeAllocationList) XXX_Size() int { + return m.Size() +} +func (m *RangeAllocationList) XXX_DiscardUnknown() { + xxx_messageInfo_RangeAllocationList.DiscardUnknown(m) +} + +var xxx_messageInfo_RangeAllocationList proto.InternalMessageInfo + +func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } +func (*RunAsUserStrategyOptions) ProtoMessage() {} +func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{13} +} +func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) +} +func (m *RunAsUserStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo + +func (m *SELinuxContextStrategyOptions) Reset() { *m = SELinuxContextStrategyOptions{} } +func (*SELinuxContextStrategyOptions) ProtoMessage() {} +func (*SELinuxContextStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{14} +} +func (m *SELinuxContextStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SELinuxContextStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SELinuxContextStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SELinuxContextStrategyOptions.Merge(m, src) +} +func (m *SELinuxContextStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SELinuxContextStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SELinuxContextStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SELinuxContextStrategyOptions proto.InternalMessageInfo + +func (m *SecurityContextConstraints) Reset() { *m = SecurityContextConstraints{} } +func (*SecurityContextConstraints) ProtoMessage() {} +func (*SecurityContextConstraints) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{15} +} +func (m *SecurityContextConstraints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecurityContextConstraints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecurityContextConstraints) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityContextConstraints.Merge(m, src) +} +func (m *SecurityContextConstraints) XXX_Size() int { + return m.Size() +} +func (m *SecurityContextConstraints) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityContextConstraints.DiscardUnknown(m) +} + +var xxx_messageInfo_SecurityContextConstraints proto.InternalMessageInfo + +func (m *SecurityContextConstraintsList) Reset() { *m = SecurityContextConstraintsList{} } +func (*SecurityContextConstraintsList) ProtoMessage() {} +func (*SecurityContextConstraintsList) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{16} +} +func (m *SecurityContextConstraintsList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecurityContextConstraintsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecurityContextConstraintsList) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityContextConstraintsList.Merge(m, src) +} +func (m *SecurityContextConstraintsList) XXX_Size() int { + return m.Size() +} +func (m *SecurityContextConstraintsList) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityContextConstraintsList.DiscardUnknown(m) +} + +var xxx_messageInfo_SecurityContextConstraintsList proto.InternalMessageInfo + +func (m *ServiceAccountPodSecurityPolicyReviewStatus) Reset() { + *m = ServiceAccountPodSecurityPolicyReviewStatus{} +} +func (*ServiceAccountPodSecurityPolicyReviewStatus) ProtoMessage() {} +func (*ServiceAccountPodSecurityPolicyReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{17} +} +func (m *ServiceAccountPodSecurityPolicyReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountPodSecurityPolicyReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceAccountPodSecurityPolicyReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountPodSecurityPolicyReviewStatus.Merge(m, src) +} +func (m *ServiceAccountPodSecurityPolicyReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountPodSecurityPolicyReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountPodSecurityPolicyReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountPodSecurityPolicyReviewStatus proto.InternalMessageInfo + +func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } +func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} +func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{18} +} +func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AllowedFlexVolume)(nil), "github.com.openshift.api.security.v1.AllowedFlexVolume") + proto.RegisterType((*FSGroupStrategyOptions)(nil), "github.com.openshift.api.security.v1.FSGroupStrategyOptions") + proto.RegisterType((*IDRange)(nil), "github.com.openshift.api.security.v1.IDRange") + proto.RegisterType((*PodSecurityPolicyReview)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicyReview") + proto.RegisterType((*PodSecurityPolicyReviewSpec)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicyReviewSpec") + proto.RegisterType((*PodSecurityPolicyReviewStatus)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicyReviewStatus") + proto.RegisterType((*PodSecurityPolicySelfSubjectReview)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicySelfSubjectReview") + proto.RegisterType((*PodSecurityPolicySelfSubjectReviewSpec)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicySelfSubjectReviewSpec") + proto.RegisterType((*PodSecurityPolicySubjectReview)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicySubjectReview") + proto.RegisterType((*PodSecurityPolicySubjectReviewSpec)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicySubjectReviewSpec") + proto.RegisterType((*PodSecurityPolicySubjectReviewStatus)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicySubjectReviewStatus") + proto.RegisterType((*RangeAllocation)(nil), "github.com.openshift.api.security.v1.RangeAllocation") + proto.RegisterType((*RangeAllocationList)(nil), "github.com.openshift.api.security.v1.RangeAllocationList") + proto.RegisterType((*RunAsUserStrategyOptions)(nil), "github.com.openshift.api.security.v1.RunAsUserStrategyOptions") + proto.RegisterType((*SELinuxContextStrategyOptions)(nil), "github.com.openshift.api.security.v1.SELinuxContextStrategyOptions") + proto.RegisterType((*SecurityContextConstraints)(nil), "github.com.openshift.api.security.v1.SecurityContextConstraints") + proto.RegisterType((*SecurityContextConstraintsList)(nil), "github.com.openshift.api.security.v1.SecurityContextConstraintsList") + proto.RegisterType((*ServiceAccountPodSecurityPolicyReviewStatus)(nil), "github.com.openshift.api.security.v1.ServiceAccountPodSecurityPolicyReviewStatus") + proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "github.com.openshift.api.security.v1.SupplementalGroupsStrategyOptions") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/security/v1/generated.proto", fileDescriptor_af65d9655aa67551) +} + +var fileDescriptor_af65d9655aa67551 = []byte{ + // 1803 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0xd7, 0x8a, 0xfa, 0xe2, 0x48, 0x96, 0xe4, 0x91, 0x2c, 0x4f, 0xd4, 0x98, 0x54, 0xd7, 0x6e, + 0x60, 0xb4, 0xcd, 0x32, 0x36, 0xd2, 0xc6, 0x45, 0x1a, 0x23, 0x5c, 0x31, 0x72, 0x14, 0xc8, 0x31, + 0x33, 0x8c, 0x82, 0x22, 0x08, 0x8a, 0x8c, 0x96, 0x43, 0x7a, 0xac, 0xe5, 0xee, 0x76, 0x67, 0x56, + 0x16, 0xd1, 0x4b, 0x81, 0xfe, 0x03, 0x05, 0x7a, 0xef, 0xb9, 0xfd, 0x07, 0x7a, 0x29, 0xda, 0x5e, + 0x0d, 0xb4, 0x45, 0x73, 0x2a, 0x72, 0x22, 0x6a, 0x16, 0xbd, 0xf4, 0xda, 0x9b, 0x0f, 0x45, 0x31, + 0xc3, 0xe1, 0xc7, 0x2e, 0x77, 0xe9, 0x4d, 0x6a, 0x0b, 0xbd, 0x69, 0xdf, 0xc7, 0xef, 0xfd, 0xde, + 0xcc, 0xbc, 0x37, 0x6f, 0x28, 0xf0, 0x66, 0x9b, 0x89, 0x87, 0xd1, 0x89, 0xe5, 0xf8, 0x9d, 0x8a, + 0x1f, 0x50, 0x8f, 0x3f, 0x64, 0x2d, 0x51, 0x21, 0x01, 0xab, 0x70, 0xea, 0x44, 0x21, 0x13, 0xdd, + 0xca, 0xd9, 0xad, 0x4a, 0x9b, 0x7a, 0x34, 0x24, 0x82, 0x36, 0xad, 0x20, 0xf4, 0x85, 0x0f, 0x6f, + 0x8c, 0xbd, 0xac, 0x91, 0x97, 0x45, 0x02, 0x66, 0x0d, 0xbd, 0xac, 0xb3, 0x5b, 0xbb, 0xaf, 0x4f, + 0x60, 0xb7, 0xfd, 0xb6, 0x5f, 0x51, 0xce, 0x27, 0x51, 0x4b, 0x7d, 0xa9, 0x0f, 0xf5, 0xd7, 0x00, + 0x74, 0xd7, 0x3c, 0xbd, 0xc3, 0x2d, 0xe6, 0xab, 0xe0, 0x8e, 0x1f, 0xd2, 0x94, 0xc0, 0xbb, 0x6f, + 0x8e, 0x6d, 0x3a, 0xc4, 0x79, 0xc8, 0x3c, 0x1a, 0x76, 0x2b, 0xc1, 0x69, 0x5b, 0x0a, 0x78, 0xa5, + 0x43, 0x05, 0x49, 0xf3, 0xfa, 0x7e, 0x96, 0x57, 0x18, 0x79, 0x82, 0x75, 0x68, 0x85, 0x3b, 0x0f, + 0x69, 0x87, 0x24, 0xfd, 0xcc, 0xb7, 0xc1, 0xe5, 0xaa, 0xeb, 0xfa, 0x8f, 0x69, 0xf3, 0xc0, 0xa5, + 0xe7, 0x9f, 0xf8, 0x6e, 0xd4, 0xa1, 0xf0, 0x35, 0xb0, 0xd4, 0x0c, 0xd9, 0x19, 0x0d, 0x91, 0xb1, + 0x67, 0xdc, 0x2c, 0xda, 0xeb, 0x4f, 0x7a, 0xe5, 0xb9, 0x7e, 0xaf, 0xbc, 0x54, 0x53, 0x52, 0xac, + 0xb5, 0xe6, 0xaf, 0x0d, 0xb0, 0x73, 0xd0, 0xb8, 0x17, 0xfa, 0x51, 0xd0, 0x10, 0x12, 0xb5, 0xdd, + 0x7d, 0x10, 0x08, 0xe6, 0x7b, 0x1c, 0xbe, 0x05, 0x16, 0x44, 0x37, 0xa0, 0x1a, 0xe0, 0xba, 0x06, + 0x58, 0xf8, 0xb8, 0x1b, 0xd0, 0x67, 0xbd, 0xf2, 0x56, 0xc2, 0x4b, 0x8a, 0xb1, 0x72, 0x80, 0xc7, + 0x60, 0x29, 0x24, 0x5e, 0x9b, 0x72, 0x34, 0xbf, 0x57, 0xb8, 0xb9, 0x7a, 0xfb, 0x75, 0x2b, 0xcf, + 0x46, 0x58, 0x87, 0x35, 0x2c, 0xbd, 0xc6, 0x54, 0xd5, 0x27, 0xc7, 0x1a, 0xcc, 0xbc, 0x07, 0x96, + 0xb5, 0x09, 0xbc, 0x06, 0x0a, 0x1d, 0xe6, 0x29, 0x66, 0x05, 0x7b, 0x55, 0xdb, 0x17, 0xee, 0x33, + 0x0f, 0x4b, 0xb9, 0x52, 0x93, 0x73, 0x34, 0x9f, 0x50, 0x93, 0x73, 0x2c, 0xe5, 0xe6, 0x5f, 0xe6, + 0xc1, 0xd5, 0xba, 0xdf, 0x6c, 0xe8, 0xd8, 0x75, 0xdf, 0x65, 0x4e, 0x17, 0xd3, 0x33, 0x46, 0x1f, + 0xc3, 0xcf, 0xc1, 0x8a, 0xdc, 0x9f, 0x26, 0x11, 0x04, 0x15, 0xf6, 0x8c, 0x9b, 0xab, 0xb7, 0xdf, + 0xb0, 0x06, 0xfb, 0x62, 0x4d, 0xee, 0x8b, 0x15, 0x9c, 0xb6, 0xa5, 0x80, 0x5b, 0xd2, 0x5a, 0xb2, + 0x7f, 0x70, 0xf2, 0x88, 0x3a, 0xe2, 0x3e, 0x15, 0xc4, 0x86, 0x3a, 0x22, 0x18, 0xcb, 0xf0, 0x08, + 0x15, 0x3a, 0x60, 0x81, 0x07, 0xd4, 0x51, 0xe4, 0x57, 0x6f, 0x57, 0xf3, 0xad, 0x4d, 0x06, 0xdd, + 0x46, 0x40, 0x1d, 0x7b, 0x6d, 0xb8, 0x33, 0xf2, 0x0b, 0x2b, 0x70, 0x78, 0x0a, 0x96, 0xb8, 0x20, + 0x22, 0xe2, 0x6a, 0x11, 0x56, 0x6f, 0xef, 0xff, 0x6f, 0x61, 0x14, 0xd4, 0x78, 0x63, 0x06, 0xdf, + 0x58, 0x87, 0x30, 0x7f, 0x67, 0x80, 0x6f, 0xcc, 0x20, 0x08, 0x3f, 0x02, 0x2b, 0x82, 0x76, 0x02, + 0x97, 0x08, 0xaa, 0xb3, 0xbe, 0x3e, 0xb1, 0xa6, 0x96, 0xac, 0x22, 0x1d, 0xfc, 0x63, 0x6d, 0xa6, + 0xf2, 0xda, 0xd4, 0xe1, 0x56, 0x86, 0x52, 0x3c, 0x82, 0x81, 0x87, 0x60, 0x8b, 0xd3, 0xf0, 0x8c, + 0x39, 0xb4, 0xea, 0x38, 0x7e, 0xe4, 0x89, 0x0f, 0x49, 0x47, 0x9f, 0xb7, 0xa2, 0x7d, 0xb5, 0xdf, + 0x2b, 0x6f, 0x35, 0xa6, 0xd5, 0x38, 0xcd, 0xc7, 0xfc, 0x93, 0x01, 0xae, 0xcd, 0xcc, 0x1b, 0xfe, + 0xc6, 0x00, 0x3b, 0x64, 0x50, 0x61, 0x71, 0x54, 0x8e, 0x0c, 0x75, 0xc0, 0x3f, 0xca, 0xb7, 0xba, + 0x71, 0xe7, 0xd9, 0x6b, 0x5d, 0xd2, 0xc9, 0xef, 0x54, 0x53, 0x03, 0xe3, 0x0c, 0x42, 0xe6, 0xbf, + 0xe6, 0x81, 0x39, 0x85, 0xdc, 0xa0, 0x6e, 0xab, 0x11, 0xa9, 0xc3, 0x78, 0x61, 0xc7, 0xdc, 0x8b, + 0x1d, 0xf3, 0xa3, 0xaf, 0x79, 0xfe, 0xa6, 0x98, 0x67, 0x9e, 0xf8, 0x30, 0x71, 0xe2, 0x3f, 0xf8, + 0xba, 0x11, 0x63, 0xd1, 0x66, 0x1f, 0xfc, 0x9f, 0x82, 0xd7, 0xf2, 0x31, 0x7e, 0x09, 0x25, 0x60, + 0xf6, 0xe7, 0x41, 0x69, 0x36, 0xfb, 0x0b, 0xd8, 0xe5, 0x47, 0xb1, 0x5d, 0x7e, 0xff, 0x85, 0xac, + 0xf9, 0xff, 0xd3, 0x0e, 0xff, 0xde, 0x48, 0x2b, 0xa7, 0x0b, 0xd8, 0x5e, 0xb8, 0x07, 0x16, 0x22, + 0x4e, 0x43, 0x95, 0x6b, 0x71, 0xbc, 0x1e, 0xc7, 0x9c, 0x86, 0x58, 0x69, 0xa0, 0x09, 0x96, 0xda, + 0xf2, 0x06, 0xe6, 0xa8, 0xa0, 0xda, 0x1e, 0x90, 0xfc, 0xd5, 0x9d, 0xcc, 0xb1, 0xd6, 0x98, 0xff, + 0x36, 0xc0, 0x8d, 0x3c, 0x0b, 0x00, 0xeb, 0xa0, 0xa8, 0x3b, 0x8a, 0xdd, 0x9d, 0x95, 0xc2, 0x03, + 0xed, 0xda, 0xa2, 0x21, 0xf5, 0x1c, 0x6a, 0x5f, 0xea, 0xf7, 0xca, 0xc5, 0xea, 0xd0, 0x13, 0x8f, + 0x41, 0xe4, 0x04, 0x12, 0x52, 0xc2, 0x7d, 0x4f, 0xa7, 0x30, 0xbe, 0xd6, 0x95, 0x14, 0x6b, 0x6d, + 0x6c, 0xed, 0x0a, 0x2f, 0xa6, 0x34, 0x7e, 0x6b, 0x80, 0x0d, 0x35, 0x28, 0x48, 0x62, 0x0e, 0x91, + 0xe3, 0x4c, 0xac, 0x16, 0x8c, 0x97, 0x52, 0x0b, 0xd7, 0xc1, 0xa2, 0x9a, 0x54, 0x74, 0xbe, 0x97, + 0xb4, 0xf1, 0xa2, 0x62, 0x82, 0x07, 0x3a, 0xf8, 0x2a, 0x58, 0x18, 0x95, 0xe3, 0x9a, 0xbd, 0x22, + 0xb7, 0xb4, 0x46, 0x04, 0xc1, 0x4a, 0x6a, 0xfe, 0xd5, 0x00, 0x5b, 0x09, 0xe2, 0x47, 0x8c, 0x0b, + 0xf8, 0xd9, 0x14, 0x79, 0x2b, 0x1f, 0x79, 0xe9, 0xad, 0xa8, 0x8f, 0x96, 0x6b, 0x28, 0x99, 0x20, + 0xfe, 0x29, 0x58, 0x64, 0x82, 0x76, 0x86, 0xe3, 0xda, 0xf7, 0xf2, 0xd5, 0x55, 0x82, 0xe7, 0x38, + 0xdf, 0x43, 0x89, 0x85, 0x07, 0x90, 0xe6, 0xdf, 0x0c, 0x80, 0x70, 0xe4, 0x55, 0xb9, 0x3c, 0xb8, + 0xc9, 0x09, 0xf3, 0x07, 0xb1, 0x09, 0xf3, 0x5b, 0x89, 0x09, 0xf3, 0xca, 0x94, 0xdf, 0xc4, 0x8c, + 0xf9, 0x0a, 0x28, 0x44, 0xac, 0xa9, 0x47, 0xbc, 0x65, 0x39, 0xde, 0x1d, 0x1f, 0xd6, 0xb0, 0x94, + 0xc1, 0x5b, 0x60, 0x35, 0x62, 0x4d, 0x45, 0xef, 0x3e, 0xf3, 0xd4, 0x4a, 0x17, 0xec, 0x8d, 0x7e, + 0xaf, 0xbc, 0x7a, 0xac, 0xe7, 0x47, 0x39, 0x28, 0x4e, 0xda, 0xc4, 0x5c, 0xc8, 0x39, 0x5a, 0x48, + 0x71, 0x21, 0xe7, 0x78, 0xd2, 0xc6, 0xfc, 0xa3, 0x01, 0xae, 0x35, 0xde, 0x3b, 0x62, 0x5e, 0x74, + 0xbe, 0xef, 0x7b, 0x82, 0x9e, 0x8b, 0x64, 0x76, 0x77, 0x63, 0xd9, 0x7d, 0x3b, 0x91, 0xdd, 0x6e, + 0xba, 0xf3, 0x44, 0x8a, 0x3f, 0x06, 0xeb, 0x9c, 0x2a, 0x1b, 0x8d, 0xa8, 0xfb, 0x9e, 0x99, 0x56, + 0x1e, 0x1a, 0x4d, 0x5b, 0xda, 0xb0, 0xdf, 0x2b, 0xaf, 0xc7, 0x65, 0x38, 0x81, 0x66, 0xfe, 0xe7, + 0x32, 0xd8, 0x1d, 0x36, 0x06, 0xcd, 0x62, 0xdf, 0xf7, 0xb8, 0x08, 0x09, 0xf3, 0x04, 0xbf, 0x80, + 0x82, 0xb9, 0x09, 0x56, 0x82, 0x90, 0xf9, 0x32, 0xbe, 0x4a, 0x6d, 0xd1, 0x5e, 0x93, 0x27, 0xb4, + 0xae, 0x65, 0x78, 0xa4, 0x85, 0x9f, 0x01, 0xa4, 0x1a, 0x4b, 0x3d, 0x64, 0x67, 0xcc, 0xa5, 0x6d, + 0xda, 0x94, 0x84, 0x89, 0x24, 0xa0, 0xf6, 0x77, 0xc5, 0xde, 0xd3, 0x91, 0x50, 0x35, 0xc3, 0x0e, + 0x67, 0x22, 0x40, 0x0e, 0x76, 0x9a, 0xb4, 0x45, 0x22, 0x57, 0x54, 0x9b, 0xcd, 0x7d, 0x12, 0x90, + 0x13, 0xe6, 0x32, 0xc1, 0x28, 0x47, 0x0b, 0xaa, 0xb1, 0xbe, 0x2d, 0xe7, 0xb0, 0x5a, 0xaa, 0xc5, + 0xb3, 0x5e, 0xf9, 0xda, 0xf4, 0x83, 0xd0, 0x1a, 0x99, 0x74, 0x71, 0x06, 0x34, 0xec, 0x02, 0x14, + 0xd2, 0x9f, 0x44, 0x2c, 0xa4, 0xcd, 0x5a, 0xe8, 0x07, 0xb1, 0xb0, 0x8b, 0x2a, 0xec, 0x3b, 0x32, + 0x1d, 0x9c, 0x61, 0xf3, 0xfc, 0xc0, 0x99, 0xf0, 0xf0, 0x11, 0xd8, 0xd2, 0x6d, 0x3a, 0x16, 0x75, + 0x49, 0x45, 0xbd, 0x23, 0x87, 0xe7, 0xea, 0xb4, 0xfa, 0xf9, 0x01, 0xd3, 0x40, 0x47, 0x3b, 0xf7, + 0xbe, 0xcf, 0x45, 0x8d, 0x85, 0x83, 0xd7, 0x69, 0xdd, 0x8d, 0xda, 0xcc, 0x43, 0xcb, 0x29, 0x3b, + 0x97, 0x62, 0x87, 0x33, 0x11, 0x60, 0x05, 0x2c, 0x9f, 0xa9, 0x6f, 0x8e, 0x56, 0x14, 0xfb, 0x2b, + 0xfd, 0x5e, 0x79, 0x79, 0x60, 0x22, 0x19, 0x2f, 0x1d, 0x34, 0x54, 0x41, 0x0d, 0xad, 0xe0, 0xcf, + 0x0d, 0x00, 0x49, 0xf2, 0xb1, 0xcc, 0xd1, 0x15, 0xd5, 0xf8, 0xde, 0xca, 0xd7, 0xf8, 0xa6, 0x1e, + 0xdb, 0xf6, 0xae, 0x4e, 0x01, 0x4e, 0xa9, 0x38, 0x4e, 0x09, 0x07, 0x6b, 0x60, 0x73, 0x94, 0xd2, + 0x87, 0x54, 0x3c, 0xf6, 0xc3, 0x53, 0x54, 0x54, 0x8b, 0x81, 0x34, 0xd2, 0x66, 0x35, 0xa1, 0xc7, + 0x53, 0x1e, 0xf0, 0x2e, 0x58, 0x1f, 0xc9, 0xea, 0x7e, 0x28, 0x38, 0x02, 0x0a, 0x63, 0x47, 0x63, + 0xac, 0x57, 0x63, 0x5a, 0x9c, 0xb0, 0x86, 0x77, 0xc0, 0xda, 0x58, 0x72, 0x58, 0x43, 0xab, 0xca, + 0x7b, 0x5b, 0x7b, 0xaf, 0x55, 0x27, 0x74, 0x38, 0x66, 0x19, 0xf3, 0x3c, 0xac, 0xef, 0xa3, 0xb5, + 0x0c, 0xcf, 0xc3, 0xfa, 0x3e, 0x8e, 0x59, 0xc2, 0xcf, 0x01, 0x94, 0xb3, 0x8b, 0x7a, 0x79, 0x05, + 0xc4, 0xa1, 0x47, 0xf4, 0x8c, 0xba, 0x68, 0x57, 0x75, 0xc8, 0x37, 0x86, 0xab, 0x78, 0x3c, 0x65, + 0xf1, 0xac, 0x57, 0x86, 0x71, 0x89, 0xda, 0xd6, 0x14, 0x2c, 0xd8, 0x01, 0xe5, 0x61, 0xc5, 0xc5, + 0xea, 0xfd, 0x3d, 0xee, 0x10, 0x57, 0xdd, 0x54, 0x68, 0x47, 0xd1, 0xbd, 0xde, 0xef, 0x95, 0xcb, + 0xb5, 0xd9, 0xa6, 0xf8, 0x79, 0x58, 0xf0, 0x47, 0xc9, 0xce, 0x34, 0x11, 0xe7, 0xaa, 0x8a, 0xf3, + 0xea, 0x74, 0x57, 0x9a, 0x08, 0x90, 0xe9, 0x2d, 0x8f, 0xea, 0xb0, 0x63, 0xeb, 0xee, 0x8c, 0x2e, + 0x7d, 0x95, 0xb7, 0xfc, 0xcc, 0xcb, 0x69, 0x7c, 0x48, 0xe2, 0x66, 0x38, 0x11, 0x12, 0xfa, 0xa0, + 0x18, 0x0e, 0xaf, 0x61, 0xb4, 0xae, 0xe2, 0xdf, 0xcd, 0x39, 0x1f, 0x64, 0xdc, 0xfa, 0xf6, 0x65, + 0x1d, 0xba, 0x38, 0xb2, 0xc0, 0xe3, 0x18, 0xf0, 0x97, 0x06, 0x80, 0x3c, 0x0a, 0x02, 0x97, 0x76, + 0xa8, 0x27, 0x88, 0x3b, 0x18, 0x68, 0xd1, 0x86, 0x0a, 0x7d, 0x2f, 0x67, 0xea, 0x53, 0xfe, 0x49, + 0x0e, 0xa3, 0x8a, 0x9d, 0x36, 0xc5, 0x29, 0xe1, 0x61, 0x1b, 0x2c, 0xb7, 0xb8, 0xfa, 0x1b, 0x6d, + 0x2a, 0x26, 0x3f, 0xcc, 0xc7, 0x24, 0xfd, 0xa7, 0x35, 0x7b, 0x43, 0x87, 0x5f, 0xd6, 0x7a, 0x3c, + 0x44, 0x87, 0x9f, 0x80, 0x9d, 0x90, 0x92, 0xe6, 0x03, 0xcf, 0xed, 0x62, 0xdf, 0x17, 0x07, 0xcc, + 0xa5, 0xbc, 0xcb, 0x05, 0xed, 0xa0, 0xcb, 0xea, 0x34, 0x8d, 0x7e, 0x17, 0xc0, 0xa9, 0x56, 0x38, + 0xc3, 0x1b, 0x96, 0xc1, 0xa2, 0x2c, 0x16, 0x8e, 0xa0, 0xea, 0x93, 0x45, 0x39, 0xa8, 0xc9, 0xf5, + 0xe6, 0x78, 0x20, 0x9f, 0x78, 0x4d, 0x6c, 0x65, 0xbd, 0x26, 0xe0, 0x3b, 0x60, 0x83, 0x53, 0xc7, + 0xf1, 0x3b, 0x41, 0x3d, 0xf4, 0x5b, 0x12, 0x1c, 0x6d, 0x2b, 0xe3, 0xad, 0x7e, 0xaf, 0xbc, 0xd1, + 0x88, 0xab, 0x70, 0xd2, 0x16, 0x1e, 0x81, 0x6d, 0xdd, 0x0c, 0x8f, 0x3d, 0x4e, 0x5a, 0xb4, 0xd1, + 0xe5, 0x8e, 0x70, 0x39, 0x42, 0x0a, 0x03, 0xf5, 0x7b, 0xe5, 0xed, 0x6a, 0x8a, 0x1e, 0xa7, 0x7a, + 0xc1, 0x77, 0xc1, 0x66, 0xcb, 0x0f, 0x4f, 0x58, 0xb3, 0x49, 0xbd, 0x21, 0xd2, 0x2b, 0x0a, 0x69, + 0x5b, 0x36, 0xd0, 0x83, 0x84, 0x0e, 0x4f, 0x59, 0x9b, 0xff, 0x34, 0x40, 0x29, 0x7b, 0x00, 0xba, + 0x80, 0xc1, 0x9b, 0xc6, 0x07, 0xef, 0x77, 0xf3, 0xfe, 0x8c, 0x94, 0x45, 0x39, 0x63, 0x06, 0xff, + 0xd5, 0x3c, 0xf8, 0xce, 0x57, 0xf8, 0xed, 0x09, 0xfe, 0xd9, 0x00, 0x37, 0x82, 0x1c, 0x8f, 0x46, + 0xbd, 0x22, 0x2f, 0xf2, 0x1d, 0xfe, 0x5d, 0x9d, 0x40, 0xae, 0x47, 0x2b, 0xce, 0xc5, 0x52, 0xbe, + 0xa4, 0x3d, 0xd2, 0xa1, 0xc9, 0x97, 0xb4, 0xbc, 0x37, 0xb0, 0xd2, 0x98, 0x7f, 0x30, 0xc0, 0x37, + 0x9f, 0xdb, 0x33, 0xa0, 0x1d, 0x9b, 0xe7, 0xad, 0xc4, 0x3c, 0x5f, 0xca, 0x06, 0x78, 0xe9, 0x3f, + 0x8d, 0xdb, 0x1f, 0x3c, 0x79, 0x5a, 0x9a, 0xfb, 0xe2, 0x69, 0x69, 0xee, 0xcb, 0xa7, 0xa5, 0xb9, + 0x9f, 0xf5, 0x4b, 0xc6, 0x93, 0x7e, 0xc9, 0xf8, 0xa2, 0x5f, 0x32, 0xbe, 0xec, 0x97, 0x8c, 0xbf, + 0xf7, 0x4b, 0xc6, 0x2f, 0xfe, 0x51, 0x9a, 0xfb, 0xf4, 0x46, 0x9e, 0xff, 0xa2, 0xfc, 0x37, 0x00, + 0x00, 0xff, 0xff, 0xb7, 0xb2, 0xaf, 0x36, 0x6c, 0x19, 0x00, 0x00, +} + +func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IDRange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicyReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicyReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicyReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicyReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicyReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicyReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ServiceAccountNames) > 0 { + for iNdEx := len(m.ServiceAccountNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ServiceAccountNames[iNdEx]) + copy(dAtA[i:], m.ServiceAccountNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountNames[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicyReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicyReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicyReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AllowedServiceAccounts) > 0 { + for iNdEx := len(m.AllowedServiceAccounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedServiceAccounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicySelfSubjectReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicySelfSubjectReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicySelfSubjectReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicySelfSubjectReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicySelfSubjectReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicySelfSubjectReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicySubjectReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicySubjectReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicySubjectReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicySubjectReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicySubjectReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicySubjectReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicySubjectReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicySubjectReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicySubjectReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + if m.AllowedBy != nil { + { + size, err := m.AllowedBy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RangeAllocation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RangeAllocation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Data != nil { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Range) + copy(dAtA[i:], m.Range) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RangeAllocationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RangeAllocationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RangeAllocationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.UIDRangeMax != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.UIDRangeMax)) + i-- + dAtA[i] = 0x20 + } + if m.UIDRangeMin != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.UIDRangeMin)) + i-- + dAtA[i] = 0x18 + } + if m.UID != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.UID)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SELinuxContextStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SELinuxContextStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SELinuxContextStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SELinuxOptions != nil { + { + size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecurityContextConstraints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecurityContextConstraints) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecurityContextConstraints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UserNamespaceLevel) + copy(dAtA[i:], m.UserNamespaceLevel) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserNamespaceLevel))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + if len(m.ForbiddenSysctls) > 0 { + for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ForbiddenSysctls[iNdEx]) + copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } + if len(m.AllowedUnsafeSysctls) > 0 { + for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedUnsafeSysctls[iNdEx]) + copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + } + if m.AllowPrivilegeEscalation != nil { + i-- + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb8 + } + if m.DefaultAllowPrivilegeEscalation != nil { + i-- + if *m.DefaultAllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if len(m.AllowedFlexVolumes) > 0 { + for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + } + if len(m.SeccompProfiles) > 0 { + for iNdEx := len(m.SeccompProfiles) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SeccompProfiles[iNdEx]) + copy(dAtA[i:], m.SeccompProfiles[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SeccompProfiles[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + } + if len(m.Users) > 0 { + for iNdEx := len(m.Users) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Users[iNdEx]) + copy(dAtA[i:], m.Users[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Users[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + } + i-- + if m.ReadOnlyRootFilesystem { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + { + size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + { + size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + { + size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + { + size, err := m.SELinuxContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + i-- + if m.AllowHostIPC { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + i-- + if m.AllowHostPID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + i-- + if m.AllowHostPorts { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + i-- + if m.AllowHostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Volumes[iNdEx]) + copy(dAtA[i:], m.Volumes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + i-- + if m.AllowHostDirVolumePlugin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + if len(m.AllowedCapabilities) > 0 { + for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedCapabilities[iNdEx]) + copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.RequiredDropCapabilities) > 0 { + for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequiredDropCapabilities[iNdEx]) + copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.DefaultAddCapabilities) > 0 { + for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DefaultAddCapabilities[iNdEx]) + copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i-- + if m.AllowPrivilegedContainer { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + if m.Priority != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecurityContextConstraintsList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecurityContextConstraintsList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecurityContextConstraintsList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceAccountPodSecurityPolicyReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccountPodSecurityPolicyReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccountPodSecurityPolicyReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + { + size, err := m.PodSecurityPolicySubjectReviewStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AllowedFlexVolume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FSGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IDRange) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Min)) + n += 1 + sovGenerated(uint64(m.Max)) + return n +} + +func (m *PodSecurityPolicyReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSecurityPolicyReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ServiceAccountNames) > 0 { + for _, s := range m.ServiceAccountNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSecurityPolicyReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AllowedServiceAccounts) > 0 { + for _, e := range m.AllowedServiceAccounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSecurityPolicySelfSubjectReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSecurityPolicySelfSubjectReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSecurityPolicySubjectReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSecurityPolicySubjectReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSecurityPolicySubjectReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AllowedBy != nil { + l = m.AllowedBy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RangeAllocation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Range) + n += 1 + l + sovGenerated(uint64(l)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RangeAllocationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RunAsUserStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.UID != nil { + n += 1 + sovGenerated(uint64(*m.UID)) + } + if m.UIDRangeMin != nil { + n += 1 + sovGenerated(uint64(*m.UIDRangeMin)) + } + if m.UIDRangeMax != nil { + n += 1 + sovGenerated(uint64(*m.UIDRangeMax)) + } + return n +} + +func (m *SELinuxContextStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SecurityContextConstraints) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Priority != nil { + n += 1 + sovGenerated(uint64(*m.Priority)) + } + n += 2 + if len(m.DefaultAddCapabilities) > 0 { + for _, s := range m.DefaultAddCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.RequiredDropCapabilities) > 0 { + for _, s := range m.RequiredDropCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedCapabilities) > 0 { + for _, s := range m.AllowedCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + n += 2 + n += 2 + n += 2 + l = m.SELinuxContext.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.RunAsUser.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.SupplementalGroups.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FSGroup.Size() + n += 2 + l + sovGenerated(uint64(l)) + n += 3 + if len(m.Users) > 0 { + for _, s := range m.Users { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.SeccompProfiles) > 0 { + for _, s := range m.SeccompProfiles { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedFlexVolumes) > 0 { + for _, e := range m.AllowedFlexVolumes { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultAllowPrivilegeEscalation != nil { + n += 3 + } + if m.AllowPrivilegeEscalation != nil { + n += 3 + } + if len(m.AllowedUnsafeSysctls) > 0 { + for _, s := range m.AllowedUnsafeSysctls { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.ForbiddenSysctls) > 0 { + for _, s := range m.ForbiddenSysctls { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + l = len(m.UserNamespaceLevel) + n += 2 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SecurityContextConstraintsList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceAccountPodSecurityPolicyReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PodSecurityPolicySubjectReviewStatus.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SupplementalGroupsStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AllowedFlexVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllowedFlexVolume{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `}`, + }, "") + return s +} +func (this *FSGroupStrategyOptions) String() string { + if this == nil { + return "nil" + } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" + s := strings.Join([]string{`&FSGroupStrategyOptions{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, + `}`, + }, "") + return s +} +func (this *IDRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IDRange{`, + `Min:` + fmt.Sprintf("%v", this.Min) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicyReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicyReview{`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicyReviewSpec", "PodSecurityPolicyReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSecurityPolicyReviewStatus", "PodSecurityPolicyReviewStatus", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicyReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicyReviewSpec{`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `ServiceAccountNames:` + fmt.Sprintf("%v", this.ServiceAccountNames) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicyReviewStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForAllowedServiceAccounts := "[]ServiceAccountPodSecurityPolicyReviewStatus{" + for _, f := range this.AllowedServiceAccounts { + repeatedStringForAllowedServiceAccounts += strings.Replace(strings.Replace(f.String(), "ServiceAccountPodSecurityPolicyReviewStatus", "ServiceAccountPodSecurityPolicyReviewStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedServiceAccounts += "}" + s := strings.Join([]string{`&PodSecurityPolicyReviewStatus{`, + `AllowedServiceAccounts:` + repeatedStringForAllowedServiceAccounts + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicySelfSubjectReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicySelfSubjectReview{`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySelfSubjectReviewSpec", "PodSecurityPolicySelfSubjectReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSecurityPolicySubjectReviewStatus", "PodSecurityPolicySubjectReviewStatus", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicySelfSubjectReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicySelfSubjectReviewSpec{`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicySubjectReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicySubjectReview{`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySubjectReviewSpec", "PodSecurityPolicySubjectReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSecurityPolicySubjectReviewStatus", "PodSecurityPolicySubjectReviewStatus", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicySubjectReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicySubjectReviewSpec{`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicySubjectReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicySubjectReviewStatus{`, + `AllowedBy:` + strings.Replace(fmt.Sprintf("%v", this.AllowedBy), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RangeAllocation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RangeAllocation{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Range:` + fmt.Sprintf("%v", this.Range) + `,`, + `Data:` + valueToStringGenerated(this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *RangeAllocationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RangeAllocation{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RangeAllocation", "RangeAllocation", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RangeAllocationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *RunAsUserStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RunAsUserStrategyOptions{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `UID:` + valueToStringGenerated(this.UID) + `,`, + `UIDRangeMin:` + valueToStringGenerated(this.UIDRangeMin) + `,`, + `UIDRangeMax:` + valueToStringGenerated(this.UIDRangeMax) + `,`, + `}`, + }, "") + return s +} +func (this *SELinuxContextStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SELinuxContextStrategyOptions{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SecurityContextConstraints) String() string { + if this == nil { + return "nil" + } + repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" + for _, f := range this.AllowedFlexVolumes { + repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedFlexVolumes += "}" + s := strings.Join([]string{`&SecurityContextConstraints{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Priority:` + valueToStringGenerated(this.Priority) + `,`, + `AllowPrivilegedContainer:` + fmt.Sprintf("%v", this.AllowPrivilegedContainer) + `,`, + `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, + `RequiredDropCapabilities:` + fmt.Sprintf("%v", this.RequiredDropCapabilities) + `,`, + `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, + `AllowHostDirVolumePlugin:` + fmt.Sprintf("%v", this.AllowHostDirVolumePlugin) + `,`, + `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, + `AllowHostNetwork:` + fmt.Sprintf("%v", this.AllowHostNetwork) + `,`, + `AllowHostPorts:` + fmt.Sprintf("%v", this.AllowHostPorts) + `,`, + `AllowHostPID:` + fmt.Sprintf("%v", this.AllowHostPID) + `,`, + `AllowHostIPC:` + fmt.Sprintf("%v", this.AllowHostIPC) + `,`, + `SELinuxContext:` + strings.Replace(strings.Replace(this.SELinuxContext.String(), "SELinuxContextStrategyOptions", "SELinuxContextStrategyOptions", 1), `&`, ``, 1) + `,`, + `RunAsUser:` + strings.Replace(strings.Replace(this.RunAsUser.String(), "RunAsUserStrategyOptions", "RunAsUserStrategyOptions", 1), `&`, ``, 1) + `,`, + `SupplementalGroups:` + strings.Replace(strings.Replace(this.SupplementalGroups.String(), "SupplementalGroupsStrategyOptions", "SupplementalGroupsStrategyOptions", 1), `&`, ``, 1) + `,`, + `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, + `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, + `Users:` + fmt.Sprintf("%v", this.Users) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `SeccompProfiles:` + fmt.Sprintf("%v", this.SeccompProfiles) + `,`, + `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, + `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, + `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, + `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, + `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, + `UserNamespaceLevel:` + fmt.Sprintf("%v", this.UserNamespaceLevel) + `,`, + `}`, + }, "") + return s +} +func (this *SecurityContextConstraintsList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]SecurityContextConstraints{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "SecurityContextConstraints", "SecurityContextConstraints", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&SecurityContextConstraintsList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountPodSecurityPolicyReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountPodSecurityPolicyReviewStatus{`, + `PodSecurityPolicySubjectReviewStatus:` + strings.Replace(strings.Replace(this.PodSecurityPolicySubjectReviewStatus.String(), "PodSecurityPolicySubjectReviewStatus", "PodSecurityPolicySubjectReviewStatus", 1), `&`, ``, 1) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *SupplementalGroupsStrategyOptions) String() string { + if this == nil { + return "nil" + } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" + s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = FSGroupStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IDRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IDRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + m.Min = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Min |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Max |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicyReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicyReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicyReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicyReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicyReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicyReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccountNames = append(m.ServiceAccountNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicyReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicyReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicyReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedServiceAccounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedServiceAccounts = append(m.AllowedServiceAccounts, ServiceAccountPodSecurityPolicyReviewStatus{}) + if err := m.AllowedServiceAccounts[len(m.AllowedServiceAccounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicySelfSubjectReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicySelfSubjectReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicySelfSubjectReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicySelfSubjectReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicySelfSubjectReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicySelfSubjectReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicySubjectReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicySubjectReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicySubjectReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicySubjectReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicySubjectReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicySubjectReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicySubjectReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicySubjectReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicySubjectReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedBy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AllowedBy == nil { + m.AllowedBy = &v11.ObjectReference{} + } + if err := m.AllowedBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RangeAllocation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RangeAllocation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RangeAllocation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Range = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RangeAllocationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RangeAllocationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RangeAllocationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RangeAllocation{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = RunAsUserStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UID = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UIDRangeMin", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UIDRangeMin = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UIDRangeMax", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UIDRangeMax = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SELinuxContextStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SELinuxContextStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SELinuxContextStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = SELinuxContextStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxOptions == nil { + m.SELinuxOptions = &v11.SELinuxOptions{} + } + if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecurityContextConstraints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecurityContextConstraints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecurityContextConstraints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Priority = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegedContainer", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowPrivilegedContainer = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDropCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowHostDirVolumePlugin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowHostDirVolumePlugin = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, FSType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowHostNetwork", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowHostNetwork = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowHostPorts", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowHostPorts = bool(v != 0) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowHostPID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowHostPID = bool(v != 0) + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowHostIPC", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowHostIPC = bool(v != 0) + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SELinuxContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RunAsUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SupplementalGroups.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FSGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnlyRootFilesystem = bool(v != 0) + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Users = append(m.Users, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SeccompProfiles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SeccompProfiles = append(m.SeccompProfiles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) + if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 22: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultAllowPrivilegeEscalation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.DefaultAllowPrivilegeEscalation = &b + case 23: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowPrivilegeEscalation = &b + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserNamespaceLevel", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserNamespaceLevel = NamespaceLevelType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecurityContextConstraintsList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecurityContextConstraintsList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecurityContextConstraintsList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, SecurityContextConstraints{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountPodSecurityPolicyReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountPodSecurityPolicyReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountPodSecurityPolicyReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSecurityPolicySubjectReviewStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PodSecurityPolicySubjectReviewStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = SupplementalGroupsStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/security/v1/generated.proto b/vendor/github.com/openshift/api/security/v1/generated.proto new file mode 100644 index 0000000000000..0e6bb094fbc47 --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/generated.proto @@ -0,0 +1,421 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.security.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/security/v1"; + +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +message AllowedFlexVolume { + // driver is the name of the Flexvolume driver. + optional string driver = 1; +} + +// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +message FSGroupStrategyOptions { + // type is the strategy that will dictate what FSGroup is used in the SecurityContext. + optional string type = 1; + + // ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. + // +listType=atomic + repeated IDRange ranges = 2; +} + +// IDRange provides a min/max of an allowed range of IDs. +// TODO: this could be reused for UIDs. +message IDRange { + // min is the start of the range, inclusive. + optional int64 min = 1; + + // max is the end of the range, inclusive. + optional int64 max = 2; +} + +// PodSecurityPolicyReview checks which service accounts (not users, since that would be cluster-wide) can create the `PodTemplateSpec` in question. +// +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +message PodSecurityPolicyReview { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 3; + + // spec is the PodSecurityPolicy to check. + optional PodSecurityPolicyReviewSpec spec = 1; + + // status represents the current information/status for the PodSecurityPolicyReview. + optional PodSecurityPolicyReviewStatus status = 2; +} + +// PodSecurityPolicyReviewSpec defines specification for PodSecurityPolicyReview +message PodSecurityPolicyReviewSpec { + // template is the PodTemplateSpec to check. The template.spec.serviceAccountName field is used + // if serviceAccountNames is empty, unless the template.spec.serviceAccountName is empty, + // in which case "default" is used. + // If serviceAccountNames is specified, template.spec.serviceAccountName is ignored. + optional .k8s.io.api.core.v1.PodTemplateSpec template = 1; + + // serviceAccountNames is an optional set of ServiceAccounts to run the check with. + // If serviceAccountNames is empty, the template.spec.serviceAccountName is used, + // unless it's empty, in which case "default" is used instead. + // If serviceAccountNames is specified, template.spec.serviceAccountName is ignored. + repeated string serviceAccountNames = 2; +} + +// PodSecurityPolicyReviewStatus represents the status of PodSecurityPolicyReview. +message PodSecurityPolicyReviewStatus { + // allowedServiceAccounts returns the list of service accounts in *this* namespace that have the power to create the PodTemplateSpec. + repeated ServiceAccountPodSecurityPolicyReviewStatus allowedServiceAccounts = 1; +} + +// PodSecurityPolicySelfSubjectReview checks whether this user/SA tuple can create the PodTemplateSpec +// +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +message PodSecurityPolicySelfSubjectReview { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 3; + + // spec defines specification the PodSecurityPolicySelfSubjectReview. + optional PodSecurityPolicySelfSubjectReviewSpec spec = 1; + + // status represents the current information/status for the PodSecurityPolicySelfSubjectReview. + optional PodSecurityPolicySubjectReviewStatus status = 2; +} + +// PodSecurityPolicySelfSubjectReviewSpec contains specification for PodSecurityPolicySelfSubjectReview. +message PodSecurityPolicySelfSubjectReviewSpec { + // template is the PodTemplateSpec to check. + optional .k8s.io.api.core.v1.PodTemplateSpec template = 1; +} + +// PodSecurityPolicySubjectReview checks whether a particular user/SA tuple can create the PodTemplateSpec. +// +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +message PodSecurityPolicySubjectReview { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 3; + + // spec defines specification for the PodSecurityPolicySubjectReview. + optional PodSecurityPolicySubjectReviewSpec spec = 1; + + // status represents the current information/status for the PodSecurityPolicySubjectReview. + optional PodSecurityPolicySubjectReviewStatus status = 2; +} + +// PodSecurityPolicySubjectReviewSpec defines specification for PodSecurityPolicySubjectReview +message PodSecurityPolicySubjectReviewSpec { + // template is the PodTemplateSpec to check. If template.spec.serviceAccountName is empty it will not be defaulted. + // If its non-empty, it will be checked. + optional .k8s.io.api.core.v1.PodTemplateSpec template = 1; + + // user is the user you're testing for. + // If you specify "user" but not "group", then is it interpreted as "What if user were not a member of any groups. + // If user and groups are empty, then the check is performed using *only* the serviceAccountName in the template. + optional string user = 2; + + // groups is the groups you're testing for. + repeated string groups = 3; +} + +// PodSecurityPolicySubjectReviewStatus contains information/status for PodSecurityPolicySubjectReview. +message PodSecurityPolicySubjectReviewStatus { + // allowedBy is a reference to the rule that allows the PodTemplateSpec. + // A rule can be a SecurityContextConstraint or a PodSecurityPolicy + // A `nil`, indicates that it was denied. + optional .k8s.io.api.core.v1.ObjectReference allowedBy = 1; + + // A machine-readable description of why this operation is in the + // "Failure" status. If this value is empty there + // is no information available. + optional string reason = 2; + + // template is the PodTemplateSpec after the defaulting is applied. + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; +} + +// RangeAllocation is used so we can easily expose a RangeAllocation typed for security group +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +message RangeAllocation { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // range is a string representing a unique label for a range of uids, "1000000000-2000000000/10000". + optional string range = 2; + + // data is a byte array representing the serialized state of a range allocation. It is a bitmap + // with each bit set to one to represent a range is taken. + optional bytes data = 3; +} + +// RangeAllocationList is a list of RangeAllocations objects +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message RangeAllocationList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of RangeAllocations. + repeated RangeAllocation items = 2; +} + +// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. +message RunAsUserStrategyOptions { + // type is the strategy that will dictate what RunAsUser is used in the SecurityContext. + optional string type = 1; + + // uid is the user id that containers must run as. Required for the MustRunAs strategy if not using + // namespace/service account allocated uids. + optional int64 uid = 2; + + // uidRangeMin defines the min value for a strategy that allocates by range. + optional int64 uidRangeMin = 3; + + // uidRangeMax defines the max value for a strategy that allocates by range. + optional int64 uidRangeMax = 4; +} + +// SELinuxContextStrategyOptions defines the strategy type and any options used to create the strategy. +message SELinuxContextStrategyOptions { + // type is the strategy that will dictate what SELinux context is used in the SecurityContext. + optional string type = 1; + + // seLinuxOptions required to run as; required for MustRunAs + optional .k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2; +} + +// SecurityContextConstraints governs the ability to make requests that affect the SecurityContext +// that will be applied to a container. +// For historical reasons SCC was exposed under the core Kubernetes API group. +// That exposure is deprecated and will be removed in a future release - users +// should instead use the security.openshift.io group to manage +// SecurityContextConstraints. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=securitycontextconstraints,scope=Cluster +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:printcolumn:name="Priv",type=string,JSONPath=.allowPrivilegedContainer,description="Determines if a container can request to be run as privileged" +// +kubebuilder:printcolumn:name="Caps",type=string,JSONPath=.allowedCapabilities,description="A list of capabilities that can be requested to add to the container" +// +kubebuilder:printcolumn:name="SELinux",type=string,JSONPath=.seLinuxContext.type,description="Strategy that will dictate what labels will be set in the SecurityContext" +// +kubebuilder:printcolumn:name="RunAsUser",type=string,JSONPath=.runAsUser.type,description="Strategy that will dictate what RunAsUser is used in the SecurityContext" +// +kubebuilder:printcolumn:name="FSGroup",type=string,JSONPath=.fsGroup.type,description="Strategy that will dictate what fs group is used by the SecurityContext" +// +kubebuilder:printcolumn:name="SupGroup",type=string,JSONPath=.supplementalGroups.type,description="Strategy that will dictate what supplemental groups are used by the SecurityContext" +// +kubebuilder:printcolumn:name="Priority",type=string,JSONPath=.priority,description="Sort order of SCCs" +// +kubebuilder:printcolumn:name="ReadOnlyRootFS",type=string,JSONPath=.readOnlyRootFilesystem,description="Force containers to run with a read only root file system" +// +kubebuilder:printcolumn:name="Volumes",type=string,JSONPath=.volumes,description="White list of allowed volume plugins" +// +kubebuilder:singular=securitycontextconstraint +// +openshift:compatibility-gen:level=1 +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +message SecurityContextConstraints { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // priority influences the sort order of SCCs when evaluating which SCCs to try first for + // a given pod request based on access in the Users and Groups fields. The higher the int, the + // higher priority. An unset value is considered a 0 priority. If scores + // for multiple SCCs are equal they will be sorted from most restrictive to + // least restrictive. If both priorities and restrictions are equal the + // SCCs will be sorted by name. + // +nullable + optional int32 priority = 2; + + // allowPrivilegedContainer determines if a container can request to be run as privileged. + optional bool allowPrivilegedContainer = 3; + + // defaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capabiility in both + // DefaultAddCapabilities and RequiredDropCapabilities. + // +nullable + // +listType=atomic + repeated string defaultAddCapabilities = 4; + + // requiredDropCapabilities are the capabilities that will be dropped from the container. These + // are required to be dropped and cannot be added. + // +nullable + // +listType=atomic + repeated string requiredDropCapabilities = 5; + + // allowedCapabilities is a list of capabilities that can be requested to add to the container. + // Capabilities in this field maybe added at the pod author's discretion. + // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. + // To allow all capabilities you may use '*'. + // +nullable + // +listType=atomic + repeated string allowedCapabilities = 6; + + // allowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin + // +k8s:conversion-gen=false + optional bool allowHostDirVolumePlugin = 7; + + // volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names + // of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use "*". + // To allow no volumes, set to ["none"]. + // +nullable + // +listType=atomic + repeated string volumes = 8; + + // allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "Volumes" field. + // +optional + // +nullable + // +listType=atomic + repeated AllowedFlexVolume allowedFlexVolumes = 21; + + // allowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + optional bool allowHostNetwork = 9; + + // allowHostPorts determines if the policy allows host ports in the containers. + optional bool allowHostPorts = 10; + + // allowHostPID determines if the policy allows host pid in the containers. + optional bool allowHostPID = 11; + + // allowHostIPC determines if the policy allows host ipc in the containers. + optional bool allowHostIPC = 12; + + // userNamespaceLevel determines if the policy allows host users in containers. + // Valid values are "AllowHostLevel", "RequirePodLevel", and omitted. + // When "AllowHostLevel" is set, a pod author may set `hostUsers` to either `true` or `false`. + // When "RequirePodLevel" is set, a pod author must set `hostUsers` to `false`. + // When omitted, the default value is "AllowHostLevel". + // +openshift:enable:FeatureGate=UserNamespacesPodSecurityStandards + // +kubebuilder:validation:Enum="AllowHostLevel";"RequirePodLevel" + // +kubebuilder:default:="AllowHostLevel" + // +default="AllowHostLevel" + // +optional + optional string userNamespaceLevel = 26; + + // defaultAllowPrivilegeEscalation controls the default setting for whether a + // process can gain more privileges than its parent process. + // +optional + // +nullable + optional bool defaultAllowPrivilegeEscalation = 22; + + // allowPrivilegeEscalation determines if a pod can request to allow + // privilege escalation. If unspecified, defaults to true. + // +optional + // +nullable + optional bool allowPrivilegeEscalation = 23; + + // seLinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. + // +nullable + optional SELinuxContextStrategyOptions seLinuxContext = 13; + + // runAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext. + // +nullable + optional RunAsUserStrategyOptions runAsUser = 14; + + // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + // +nullable + optional SupplementalGroupsStrategyOptions supplementalGroups = 15; + + // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. + // +nullable + optional FSGroupStrategyOptions fsGroup = 16; + + // readOnlyRootFilesystem when set to true will force containers to run with a read only root file + // system. If the container specifically requests to run with a non-read only root file system + // the SCC should deny the pod. + // If set to false the container may run with a read only root file system if it wishes but it + // will not be forced to. + optional bool readOnlyRootFilesystem = 17; + + // The users who have permissions to use this security context constraints + // +optional + // +nullable + // +listType=atomic + repeated string users = 18; + + // The groups that have permission to use this security context constraints + // +optional + // +nullable + // +listType=atomic + repeated string groups = 19; + + // seccompProfiles lists the allowed profiles that may be set for the pod or + // container's seccomp annotations. An unset (nil) or empty value means that no profiles may + // be specifid by the pod or container. The wildcard '*' may be used to allow all profiles. When + // used to generate a value for a pod the first non-wildcard profile will be used as + // the default. + // +nullable + // +listType=atomic + repeated string seccompProfiles = 20; + + // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. + // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. + // + // Examples: + // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. + // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. + // +optional + // +nullable + // +listType=atomic + repeated string allowedUnsafeSysctls = 24; + + // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. + // + // Examples: + // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. + // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. + // +optional + // +nullable + // +listType=atomic + repeated string forbiddenSysctls = 25; +} + +// SecurityContextConstraintsList is a list of SecurityContextConstraints objects +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message SecurityContextConstraintsList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of security context constraints. + repeated SecurityContextConstraints items = 2; +} + +// ServiceAccountPodSecurityPolicyReviewStatus represents ServiceAccount name and related review status +message ServiceAccountPodSecurityPolicyReviewStatus { + optional PodSecurityPolicySubjectReviewStatus podSecurityPolicySubjectReviewStatus = 1; + + // name contains the allowed and the denied ServiceAccount name + optional string name = 2; +} + +// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +message SupplementalGroupsStrategyOptions { + // type is the strategy that will dictate what supplemental groups is used in the SecurityContext. + optional string type = 1; + + // ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. + // +listType=atomic + repeated IDRange ranges = 2; +} + diff --git a/vendor/github.com/openshift/api/security/v1/legacy.go b/vendor/github.com/openshift/api/security/v1/legacy.go new file mode 100644 index 0000000000000..34f609a07b05c --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/legacy.go @@ -0,0 +1,25 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &SecurityContextConstraints{}, + &SecurityContextConstraintsList{}, + &PodSecurityPolicySubjectReview{}, + &PodSecurityPolicySelfSubjectReview{}, + &PodSecurityPolicyReview{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/security/v1/register.go b/vendor/github.com/openshift/api/security/v1/register.go new file mode 100644 index 0000000000000..431c3b5397845 --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/register.go @@ -0,0 +1,44 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "security.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &SecurityContextConstraints{}, + &SecurityContextConstraintsList{}, + &PodSecurityPolicySubjectReview{}, + &PodSecurityPolicySelfSubjectReview{}, + &PodSecurityPolicyReview{}, + &RangeAllocation{}, + &RangeAllocationList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/security/v1/types.go b/vendor/github.com/openshift/api/security/v1/types.go new file mode 100644 index 0000000000000..18585e97c042f --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/types.go @@ -0,0 +1,517 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// AllowAllCapabilities can be used as a value for the +// SecurityContextConstraints.AllowAllCapabilities field and means that any +// capabilities are allowed to be requested. +var AllowAllCapabilities corev1.Capability = "*" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SecurityContextConstraints governs the ability to make requests that affect the SecurityContext +// that will be applied to a container. +// For historical reasons SCC was exposed under the core Kubernetes API group. +// That exposure is deprecated and will be removed in a future release - users +// should instead use the security.openshift.io group to manage +// SecurityContextConstraints. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=securitycontextconstraints,scope=Cluster +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:printcolumn:name="Priv",type=string,JSONPath=.allowPrivilegedContainer,description="Determines if a container can request to be run as privileged" +// +kubebuilder:printcolumn:name="Caps",type=string,JSONPath=.allowedCapabilities,description="A list of capabilities that can be requested to add to the container" +// +kubebuilder:printcolumn:name="SELinux",type=string,JSONPath=.seLinuxContext.type,description="Strategy that will dictate what labels will be set in the SecurityContext" +// +kubebuilder:printcolumn:name="RunAsUser",type=string,JSONPath=.runAsUser.type,description="Strategy that will dictate what RunAsUser is used in the SecurityContext" +// +kubebuilder:printcolumn:name="FSGroup",type=string,JSONPath=.fsGroup.type,description="Strategy that will dictate what fs group is used by the SecurityContext" +// +kubebuilder:printcolumn:name="SupGroup",type=string,JSONPath=.supplementalGroups.type,description="Strategy that will dictate what supplemental groups are used by the SecurityContext" +// +kubebuilder:printcolumn:name="Priority",type=string,JSONPath=.priority,description="Sort order of SCCs" +// +kubebuilder:printcolumn:name="ReadOnlyRootFS",type=string,JSONPath=.readOnlyRootFilesystem,description="Force containers to run with a read only root file system" +// +kubebuilder:printcolumn:name="Volumes",type=string,JSONPath=.volumes,description="White list of allowed volume plugins" +// +kubebuilder:singular=securitycontextconstraint +// +openshift:compatibility-gen:level=1 +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true +type SecurityContextConstraints struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // priority influences the sort order of SCCs when evaluating which SCCs to try first for + // a given pod request based on access in the Users and Groups fields. The higher the int, the + // higher priority. An unset value is considered a 0 priority. If scores + // for multiple SCCs are equal they will be sorted from most restrictive to + // least restrictive. If both priorities and restrictions are equal the + // SCCs will be sorted by name. + // +nullable + Priority *int32 `json:"priority" protobuf:"varint,2,opt,name=priority"` + + // allowPrivilegedContainer determines if a container can request to be run as privileged. + AllowPrivilegedContainer bool `json:"allowPrivilegedContainer" protobuf:"varint,3,opt,name=allowPrivilegedContainer"` + // defaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capabiility in both + // DefaultAddCapabilities and RequiredDropCapabilities. + // +nullable + // +listType=atomic + DefaultAddCapabilities []corev1.Capability `json:"defaultAddCapabilities" protobuf:"bytes,4,rep,name=defaultAddCapabilities,casttype=Capability"` + // requiredDropCapabilities are the capabilities that will be dropped from the container. These + // are required to be dropped and cannot be added. + // +nullable + // +listType=atomic + RequiredDropCapabilities []corev1.Capability `json:"requiredDropCapabilities" protobuf:"bytes,5,rep,name=requiredDropCapabilities,casttype=Capability"` + // allowedCapabilities is a list of capabilities that can be requested to add to the container. + // Capabilities in this field maybe added at the pod author's discretion. + // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. + // To allow all capabilities you may use '*'. + // +nullable + // +listType=atomic + AllowedCapabilities []corev1.Capability `json:"allowedCapabilities" protobuf:"bytes,6,rep,name=allowedCapabilities,casttype=Capability"` + // allowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin + // +k8s:conversion-gen=false + AllowHostDirVolumePlugin bool `json:"allowHostDirVolumePlugin" protobuf:"varint,7,opt,name=allowHostDirVolumePlugin"` + // volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names + // of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use "*". + // To allow no volumes, set to ["none"]. + // +nullable + // +listType=atomic + Volumes []FSType `json:"volumes" protobuf:"bytes,8,rep,name=volumes,casttype=FSType"` + // allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "Volumes" field. + // +optional + // +nullable + // +listType=atomic + AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,21,rep,name=allowedFlexVolumes"` + // allowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + AllowHostNetwork bool `json:"allowHostNetwork" protobuf:"varint,9,opt,name=allowHostNetwork"` + // allowHostPorts determines if the policy allows host ports in the containers. + AllowHostPorts bool `json:"allowHostPorts" protobuf:"varint,10,opt,name=allowHostPorts"` + // allowHostPID determines if the policy allows host pid in the containers. + AllowHostPID bool `json:"allowHostPID" protobuf:"varint,11,opt,name=allowHostPID"` + // allowHostIPC determines if the policy allows host ipc in the containers. + AllowHostIPC bool `json:"allowHostIPC" protobuf:"varint,12,opt,name=allowHostIPC"` + // userNamespaceLevel determines if the policy allows host users in containers. + // Valid values are "AllowHostLevel", "RequirePodLevel", and omitted. + // When "AllowHostLevel" is set, a pod author may set `hostUsers` to either `true` or `false`. + // When "RequirePodLevel" is set, a pod author must set `hostUsers` to `false`. + // When omitted, the default value is "AllowHostLevel". + // +openshift:enable:FeatureGate=UserNamespacesPodSecurityStandards + // +kubebuilder:validation:Enum="AllowHostLevel";"RequirePodLevel" + // +kubebuilder:default:="AllowHostLevel" + // +default="AllowHostLevel" + // +optional + UserNamespaceLevel NamespaceLevelType `json:"userNamespaceLevel,omitempty" protobuf:"bytes,26,opt,name=userNamespaceLevel"` + // defaultAllowPrivilegeEscalation controls the default setting for whether a + // process can gain more privileges than its parent process. + // +optional + // +nullable + DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,22,rep,name=defaultAllowPrivilegeEscalation"` + // allowPrivilegeEscalation determines if a pod can request to allow + // privilege escalation. If unspecified, defaults to true. + // +optional + // +nullable + AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,23,rep,name=allowPrivilegeEscalation"` + // seLinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. + // +nullable + SELinuxContext SELinuxContextStrategyOptions `json:"seLinuxContext,omitempty" protobuf:"bytes,13,opt,name=seLinuxContext"` + // runAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext. + // +nullable + RunAsUser RunAsUserStrategyOptions `json:"runAsUser,omitempty" protobuf:"bytes,14,opt,name=runAsUser"` + // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + // +nullable + SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups,omitempty" protobuf:"bytes,15,opt,name=supplementalGroups"` + // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. + // +nullable + FSGroup FSGroupStrategyOptions `json:"fsGroup,omitempty" protobuf:"bytes,16,opt,name=fsGroup"` + // readOnlyRootFilesystem when set to true will force containers to run with a read only root file + // system. If the container specifically requests to run with a non-read only root file system + // the SCC should deny the pod. + // If set to false the container may run with a read only root file system if it wishes but it + // will not be forced to. + ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem" protobuf:"varint,17,opt,name=readOnlyRootFilesystem"` + + // The users who have permissions to use this security context constraints + // +optional + // +nullable + // +listType=atomic + Users []string `json:"users" protobuf:"bytes,18,rep,name=users"` + // The groups that have permission to use this security context constraints + // +optional + // +nullable + // +listType=atomic + Groups []string `json:"groups" protobuf:"bytes,19,rep,name=groups"` + + // seccompProfiles lists the allowed profiles that may be set for the pod or + // container's seccomp annotations. An unset (nil) or empty value means that no profiles may + // be specifid by the pod or container. The wildcard '*' may be used to allow all profiles. When + // used to generate a value for a pod the first non-wildcard profile will be used as + // the default. + // +nullable + // +listType=atomic + SeccompProfiles []string `json:"seccompProfiles,omitempty" protobuf:"bytes,20,opt,name=seccompProfiles"` + + // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. + // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. + // + // Examples: + // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. + // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. + // +optional + // +nullable + // +listType=atomic + AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,24,rep,name=allowedUnsafeSysctls"` + // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. + // + // Examples: + // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. + // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. + // +optional + // +nullable + // +listType=atomic + ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,25,rep,name=forbiddenSysctls"` +} + +// FS Type gives strong typing to different file systems that are used by volumes. +type FSType string + +var ( + FSTypeAzureFile FSType = "azureFile" + FSTypeAzureDisk FSType = "azureDisk" + FSTypeFlocker FSType = "flocker" + FSTypeFlexVolume FSType = "flexVolume" + FSTypeHostPath FSType = "hostPath" + FSTypeEmptyDir FSType = "emptyDir" + FSTypeGCEPersistentDisk FSType = "gcePersistentDisk" + FSTypeAWSElasticBlockStore FSType = "awsElasticBlockStore" + FSTypeGitRepo FSType = "gitRepo" + FSTypeSecret FSType = "secret" + FSTypeNFS FSType = "nfs" + FSTypeISCSI FSType = "iscsi" + FSTypeGlusterfs FSType = "glusterfs" + FSTypePersistentVolumeClaim FSType = "persistentVolumeClaim" + FSTypeRBD FSType = "rbd" + FSTypeCinder FSType = "cinder" + FSTypeCephFS FSType = "cephFS" + FSTypeDownwardAPI FSType = "downwardAPI" + FSTypeFC FSType = "fc" + FSTypeConfigMap FSType = "configMap" + FSTypeVsphereVolume FSType = "vsphere" + FSTypeQuobyte FSType = "quobyte" + FSTypePhotonPersistentDisk FSType = "photonPersistentDisk" + FSProjected FSType = "projected" + FSPortworxVolume FSType = "portworxVolume" + FSScaleIO FSType = "scaleIO" + FSStorageOS FSType = "storageOS" + FSTypeCSI FSType = "csi" + FSTypeEphemeral FSType = "ephemeral" + FSTypeImage FSType = "image" + FSTypeAll FSType = "*" + FSTypeNone FSType = "none" +) + +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +type AllowedFlexVolume struct { + // driver is the name of the Flexvolume driver. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` +} + +// SELinuxContextStrategyOptions defines the strategy type and any options used to create the strategy. +type SELinuxContextStrategyOptions struct { + // type is the strategy that will dictate what SELinux context is used in the SecurityContext. + Type SELinuxContextStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=SELinuxContextStrategyType"` + // seLinuxOptions required to run as; required for MustRunAs + SELinuxOptions *corev1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` +} + +// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. +type RunAsUserStrategyOptions struct { + // type is the strategy that will dictate what RunAsUser is used in the SecurityContext. + Type RunAsUserStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=RunAsUserStrategyType"` + // uid is the user id that containers must run as. Required for the MustRunAs strategy if not using + // namespace/service account allocated uids. + UID *int64 `json:"uid,omitempty" protobuf:"varint,2,opt,name=uid"` + // uidRangeMin defines the min value for a strategy that allocates by range. + UIDRangeMin *int64 `json:"uidRangeMin,omitempty" protobuf:"varint,3,opt,name=uidRangeMin"` + // uidRangeMax defines the max value for a strategy that allocates by range. + UIDRangeMax *int64 `json:"uidRangeMax,omitempty" protobuf:"varint,4,opt,name=uidRangeMax"` +} + +// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +type FSGroupStrategyOptions struct { + // type is the strategy that will dictate what FSGroup is used in the SecurityContext. + Type FSGroupStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=FSGroupStrategyType"` + // ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. + // +listType=atomic + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +type SupplementalGroupsStrategyOptions struct { + // type is the strategy that will dictate what supplemental groups is used in the SecurityContext. + Type SupplementalGroupsStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=SupplementalGroupsStrategyType"` + // ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. + // +listType=atomic + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// IDRange provides a min/max of an allowed range of IDs. +// TODO: this could be reused for UIDs. +type IDRange struct { + // min is the start of the range, inclusive. + Min int64 `json:"min,omitempty" protobuf:"varint,1,opt,name=min"` + // max is the end of the range, inclusive. + Max int64 `json:"max,omitempty" protobuf:"varint,2,opt,name=max"` +} + +// NamespaceLevelType shows the allowable values for the UserNamespaceLevel field. +type NamespaceLevelType string + +// SELinuxContextStrategyType denotes strategy types for generating SELinux options for a +// SecurityContext +type SELinuxContextStrategyType string + +// RunAsUserStrategyType denotes strategy types for generating RunAsUser values for a +// SecurityContext +type RunAsUserStrategyType string + +// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental +// groups for a SecurityContext. +type SupplementalGroupsStrategyType string + +// FSGroupStrategyType denotes strategy types for generating FSGroup values for a +// SecurityContext +type FSGroupStrategyType string + +const ( + // NamespaceLevelAllowHost allows a pod to set `hostUsers` field to either `true` or `false` + NamespaceLevelAllowHost NamespaceLevelType = "AllowHostLevel" + // NamespaceLevelRequirePod requires the `hostUsers` field be `false` in a pod. + NamespaceLevelRequirePod NamespaceLevelType = "RequirePodLevel" + + // container must have SELinux labels of X applied. + SELinuxStrategyMustRunAs SELinuxContextStrategyType = "MustRunAs" + // container may make requests for any SELinux context labels. + SELinuxStrategyRunAsAny SELinuxContextStrategyType = "RunAsAny" + + // container must run as a particular uid. + RunAsUserStrategyMustRunAs RunAsUserStrategyType = "MustRunAs" + // container must run as a particular uid. + RunAsUserStrategyMustRunAsRange RunAsUserStrategyType = "MustRunAsRange" + // container must run as a non-root uid + RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategyType = "MustRunAsNonRoot" + // container may make requests for any uid. + RunAsUserStrategyRunAsAny RunAsUserStrategyType = "RunAsAny" + + // container must have FSGroup of X applied. + FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" + // container may make requests for any FSGroup labels. + FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" + + // container must run as a particular gid. + SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" + // container may make requests for any gid. + SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SecurityContextConstraintsList is a list of SecurityContextConstraints objects +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type SecurityContextConstraintsList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of security context constraints. + Items []SecurityContextConstraints `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodSecurityPolicySubjectReview checks whether a particular user/SA tuple can create the PodTemplateSpec. +// +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type PodSecurityPolicySubjectReview struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,3,opt,name=metadata"` + + // spec defines specification for the PodSecurityPolicySubjectReview. + Spec PodSecurityPolicySubjectReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"` + + // status represents the current information/status for the PodSecurityPolicySubjectReview. + Status PodSecurityPolicySubjectReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// PodSecurityPolicySubjectReviewSpec defines specification for PodSecurityPolicySubjectReview +type PodSecurityPolicySubjectReviewSpec struct { + // template is the PodTemplateSpec to check. If template.spec.serviceAccountName is empty it will not be defaulted. + // If its non-empty, it will be checked. + Template corev1.PodTemplateSpec `json:"template" protobuf:"bytes,1,opt,name=template"` + + // user is the user you're testing for. + // If you specify "user" but not "group", then is it interpreted as "What if user were not a member of any groups. + // If user and groups are empty, then the check is performed using *only* the serviceAccountName in the template. + User string `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + + // groups is the groups you're testing for. + Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` +} + +// PodSecurityPolicySubjectReviewStatus contains information/status for PodSecurityPolicySubjectReview. +type PodSecurityPolicySubjectReviewStatus struct { + // allowedBy is a reference to the rule that allows the PodTemplateSpec. + // A rule can be a SecurityContextConstraint or a PodSecurityPolicy + // A `nil`, indicates that it was denied. + AllowedBy *corev1.ObjectReference `json:"allowedBy,omitempty" protobuf:"bytes,1,opt,name=allowedBy"` + + // A machine-readable description of why this operation is in the + // "Failure" status. If this value is empty there + // is no information available. + Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` + + // template is the PodTemplateSpec after the defaulting is applied. + Template corev1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` +} + +// +genclient +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodSecurityPolicySelfSubjectReview checks whether this user/SA tuple can create the PodTemplateSpec +// +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type PodSecurityPolicySelfSubjectReview struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,3,opt,name=metadata"` + + // spec defines specification the PodSecurityPolicySelfSubjectReview. + Spec PodSecurityPolicySelfSubjectReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"` + + // status represents the current information/status for the PodSecurityPolicySelfSubjectReview. + Status PodSecurityPolicySubjectReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// PodSecurityPolicySelfSubjectReviewSpec contains specification for PodSecurityPolicySelfSubjectReview. +type PodSecurityPolicySelfSubjectReviewSpec struct { + // template is the PodTemplateSpec to check. + Template corev1.PodTemplateSpec `json:"template" protobuf:"bytes,1,opt,name=template"` +} + +// +genclient +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodSecurityPolicyReview checks which service accounts (not users, since that would be cluster-wide) can create the `PodTemplateSpec` in question. +// +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type PodSecurityPolicyReview struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,3,opt,name=metadata"` + + // spec is the PodSecurityPolicy to check. + Spec PodSecurityPolicyReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"` + + // status represents the current information/status for the PodSecurityPolicyReview. + Status PodSecurityPolicyReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// PodSecurityPolicyReviewSpec defines specification for PodSecurityPolicyReview +type PodSecurityPolicyReviewSpec struct { + // template is the PodTemplateSpec to check. The template.spec.serviceAccountName field is used + // if serviceAccountNames is empty, unless the template.spec.serviceAccountName is empty, + // in which case "default" is used. + // If serviceAccountNames is specified, template.spec.serviceAccountName is ignored. + Template corev1.PodTemplateSpec `json:"template" protobuf:"bytes,1,opt,name=template"` + + // serviceAccountNames is an optional set of ServiceAccounts to run the check with. + // If serviceAccountNames is empty, the template.spec.serviceAccountName is used, + // unless it's empty, in which case "default" is used instead. + // If serviceAccountNames is specified, template.spec.serviceAccountName is ignored. + ServiceAccountNames []string `json:"serviceAccountNames,omitempty" protobuf:"bytes,2,rep,name=serviceAccountNames"` // TODO: find a way to express 'all service accounts' +} + +// PodSecurityPolicyReviewStatus represents the status of PodSecurityPolicyReview. +type PodSecurityPolicyReviewStatus struct { + // allowedServiceAccounts returns the list of service accounts in *this* namespace that have the power to create the PodTemplateSpec. + AllowedServiceAccounts []ServiceAccountPodSecurityPolicyReviewStatus `json:"allowedServiceAccounts" protobuf:"bytes,1,rep,name=allowedServiceAccounts"` +} + +// ServiceAccountPodSecurityPolicyReviewStatus represents ServiceAccount name and related review status +type ServiceAccountPodSecurityPolicyReviewStatus struct { + PodSecurityPolicySubjectReviewStatus `json:",inline" protobuf:"bytes,1,opt,name=podSecurityPolicySubjectReviewStatus"` + + // name contains the allowed and the denied ServiceAccount name + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RangeAllocation is used so we can easily expose a RangeAllocation typed for security group +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type RangeAllocation struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // range is a string representing a unique label for a range of uids, "1000000000-2000000000/10000". + Range string `json:"range" protobuf:"bytes,2,opt,name=range"` + + // data is a byte array representing the serialized state of a range allocation. It is a bitmap + // with each bit set to one to represent a range is taken. + Data []byte `json:"data" protobuf:"bytes,3,opt,name=data"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RangeAllocationList is a list of RangeAllocations objects +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type RangeAllocationList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of RangeAllocations. + Items []RangeAllocation `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/openshift/api/security/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/security/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..66e8b5a21cdeb --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/zz_generated.deepcopy.go @@ -0,0 +1,536 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. +func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { + if in == nil { + return nil + } + out := new(AllowedFlexVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions. +func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions { + if in == nil { + return nil + } + out := new(FSGroupStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IDRange) DeepCopyInto(out *IDRange) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange. +func (in *IDRange) DeepCopy() *IDRange { + if in == nil { + return nil + } + out := new(IDRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicyReview) DeepCopyInto(out *PodSecurityPolicyReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyReview. +func (in *PodSecurityPolicyReview) DeepCopy() *PodSecurityPolicyReview { + if in == nil { + return nil + } + out := new(PodSecurityPolicyReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodSecurityPolicyReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicyReviewSpec) DeepCopyInto(out *PodSecurityPolicyReviewSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + if in.ServiceAccountNames != nil { + in, out := &in.ServiceAccountNames, &out.ServiceAccountNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyReviewSpec. +func (in *PodSecurityPolicyReviewSpec) DeepCopy() *PodSecurityPolicyReviewSpec { + if in == nil { + return nil + } + out := new(PodSecurityPolicyReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicyReviewStatus) DeepCopyInto(out *PodSecurityPolicyReviewStatus) { + *out = *in + if in.AllowedServiceAccounts != nil { + in, out := &in.AllowedServiceAccounts, &out.AllowedServiceAccounts + *out = make([]ServiceAccountPodSecurityPolicyReviewStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyReviewStatus. +func (in *PodSecurityPolicyReviewStatus) DeepCopy() *PodSecurityPolicyReviewStatus { + if in == nil { + return nil + } + out := new(PodSecurityPolicyReviewStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicySelfSubjectReview) DeepCopyInto(out *PodSecurityPolicySelfSubjectReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySelfSubjectReview. +func (in *PodSecurityPolicySelfSubjectReview) DeepCopy() *PodSecurityPolicySelfSubjectReview { + if in == nil { + return nil + } + out := new(PodSecurityPolicySelfSubjectReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodSecurityPolicySelfSubjectReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicySelfSubjectReviewSpec) DeepCopyInto(out *PodSecurityPolicySelfSubjectReviewSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySelfSubjectReviewSpec. +func (in *PodSecurityPolicySelfSubjectReviewSpec) DeepCopy() *PodSecurityPolicySelfSubjectReviewSpec { + if in == nil { + return nil + } + out := new(PodSecurityPolicySelfSubjectReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicySubjectReview) DeepCopyInto(out *PodSecurityPolicySubjectReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySubjectReview. +func (in *PodSecurityPolicySubjectReview) DeepCopy() *PodSecurityPolicySubjectReview { + if in == nil { + return nil + } + out := new(PodSecurityPolicySubjectReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodSecurityPolicySubjectReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicySubjectReviewSpec) DeepCopyInto(out *PodSecurityPolicySubjectReviewSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySubjectReviewSpec. +func (in *PodSecurityPolicySubjectReviewSpec) DeepCopy() *PodSecurityPolicySubjectReviewSpec { + if in == nil { + return nil + } + out := new(PodSecurityPolicySubjectReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicySubjectReviewStatus) DeepCopyInto(out *PodSecurityPolicySubjectReviewStatus) { + *out = *in + if in.AllowedBy != nil { + in, out := &in.AllowedBy, &out.AllowedBy + *out = new(corev1.ObjectReference) + **out = **in + } + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySubjectReviewStatus. +func (in *PodSecurityPolicySubjectReviewStatus) DeepCopy() *PodSecurityPolicySubjectReviewStatus { + if in == nil { + return nil + } + out := new(PodSecurityPolicySubjectReviewStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RangeAllocation) DeepCopyInto(out *RangeAllocation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocation. +func (in *RangeAllocation) DeepCopy() *RangeAllocation { + if in == nil { + return nil + } + out := new(RangeAllocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RangeAllocation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RangeAllocationList) DeepCopyInto(out *RangeAllocationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RangeAllocation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocationList. +func (in *RangeAllocationList) DeepCopy() *RangeAllocationList { + if in == nil { + return nil + } + out := new(RangeAllocationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RangeAllocationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { + *out = *in + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(int64) + **out = **in + } + if in.UIDRangeMin != nil { + in, out := &in.UIDRangeMin, &out.UIDRangeMin + *out = new(int64) + **out = **in + } + if in.UIDRangeMax != nil { + in, out := &in.UIDRangeMax, &out.UIDRangeMax + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions. +func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { + if in == nil { + return nil + } + out := new(RunAsUserStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SELinuxContextStrategyOptions) DeepCopyInto(out *SELinuxContextStrategyOptions) { + *out = *in + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(corev1.SELinuxOptions) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxContextStrategyOptions. +func (in *SELinuxContextStrategyOptions) DeepCopy() *SELinuxContextStrategyOptions { + if in == nil { + return nil + } + out := new(SELinuxContextStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityContextConstraints) DeepCopyInto(out *SecurityContextConstraints) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(int32) + **out = **in + } + if in.DefaultAddCapabilities != nil { + in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities + *out = make([]corev1.Capability, len(*in)) + copy(*out, *in) + } + if in.RequiredDropCapabilities != nil { + in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities + *out = make([]corev1.Capability, len(*in)) + copy(*out, *in) + } + if in.AllowedCapabilities != nil { + in, out := &in.AllowedCapabilities, &out.AllowedCapabilities + *out = make([]corev1.Capability, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]FSType, len(*in)) + copy(*out, *in) + } + if in.AllowedFlexVolumes != nil { + in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes + *out = make([]AllowedFlexVolume, len(*in)) + copy(*out, *in) + } + if in.DefaultAllowPrivilegeEscalation != nil { + in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation + *out = new(bool) + **out = **in + } + if in.AllowPrivilegeEscalation != nil { + in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation + *out = new(bool) + **out = **in + } + in.SELinuxContext.DeepCopyInto(&out.SELinuxContext) + in.RunAsUser.DeepCopyInto(&out.RunAsUser) + in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) + in.FSGroup.DeepCopyInto(&out.FSGroup) + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SeccompProfiles != nil { + in, out := &in.SeccompProfiles, &out.SeccompProfiles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllowedUnsafeSysctls != nil { + in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ForbiddenSysctls != nil { + in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContextConstraints. +func (in *SecurityContextConstraints) DeepCopy() *SecurityContextConstraints { + if in == nil { + return nil + } + out := new(SecurityContextConstraints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecurityContextConstraints) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityContextConstraintsList) DeepCopyInto(out *SecurityContextConstraintsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SecurityContextConstraints, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContextConstraintsList. +func (in *SecurityContextConstraintsList) DeepCopy() *SecurityContextConstraintsList { + if in == nil { + return nil + } + out := new(SecurityContextConstraintsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecurityContextConstraintsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountPodSecurityPolicyReviewStatus) DeepCopyInto(out *ServiceAccountPodSecurityPolicyReviewStatus) { + *out = *in + in.PodSecurityPolicySubjectReviewStatus.DeepCopyInto(&out.PodSecurityPolicySubjectReviewStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountPodSecurityPolicyReviewStatus. +func (in *ServiceAccountPodSecurityPolicyReviewStatus) DeepCopy() *ServiceAccountPodSecurityPolicyReviewStatus { + if in == nil { + return nil + } + out := new(ServiceAccountPodSecurityPolicyReviewStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions. +func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions { + if in == nil { + return nil + } + out := new(SupplementalGroupsStrategyOptions) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/security/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/security/v1/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 0000000000000..178c97078011a --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,60 @@ +securitycontextconstraints.security.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: securitycontextconstraints.security.openshift.io + Capability: "" + Category: "" + FeatureGates: + - UserNamespacesPodSecurityStandards + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_03" + GroupName: security.openshift.io + HasStatus: false + KindName: SecurityContextConstraints + Labels: {} + PluralName: securitycontextconstraints + PrinterColumns: + - description: Determines if a container can request to be run as privileged + jsonPath: .allowPrivilegedContainer + name: Priv + type: string + - description: A list of capabilities that can be requested to add to the container + jsonPath: .allowedCapabilities + name: Caps + type: string + - description: Strategy that will dictate what labels will be set in the SecurityContext + jsonPath: .seLinuxContext.type + name: SELinux + type: string + - description: Strategy that will dictate what RunAsUser is used in the SecurityContext + jsonPath: .runAsUser.type + name: RunAsUser + type: string + - description: Strategy that will dictate what fs group is used by the SecurityContext + jsonPath: .fsGroup.type + name: FSGroup + type: string + - description: Strategy that will dictate what supplemental groups are used by the + SecurityContext + jsonPath: .supplementalGroups.type + name: SupGroup + type: string + - description: Sort order of SCCs + jsonPath: .priority + name: Priority + type: string + - description: Force containers to run with a read only root file system + jsonPath: .readOnlyRootFilesystem + name: ReadOnlyRootFS + type: string + - description: White list of allowed volume plugins + jsonPath: .volumes + name: Volumes + type: string + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + diff --git a/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000..29cddf7e64727 --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,232 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AllowedFlexVolume = map[string]string{ + "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.", + "driver": "driver is the name of the Flexvolume driver.", +} + +func (AllowedFlexVolume) SwaggerDoc() map[string]string { + return map_AllowedFlexVolume +} + +var map_FSGroupStrategyOptions = map[string]string{ + "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy.", + "type": "type is the strategy that will dictate what FSGroup is used in the SecurityContext.", + "ranges": "ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end.", +} + +func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { + return map_FSGroupStrategyOptions +} + +var map_IDRange = map[string]string{ + "": "IDRange provides a min/max of an allowed range of IDs.", + "min": "min is the start of the range, inclusive.", + "max": "max is the end of the range, inclusive.", +} + +func (IDRange) SwaggerDoc() map[string]string { + return map_IDRange +} + +var map_PodSecurityPolicyReview = map[string]string{ + "": "PodSecurityPolicyReview checks which service accounts (not users, since that would be cluster-wide) can create the `PodTemplateSpec` in question.\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the PodSecurityPolicy to check.", + "status": "status represents the current information/status for the PodSecurityPolicyReview.", +} + +func (PodSecurityPolicyReview) SwaggerDoc() map[string]string { + return map_PodSecurityPolicyReview +} + +var map_PodSecurityPolicyReviewSpec = map[string]string{ + "": "PodSecurityPolicyReviewSpec defines specification for PodSecurityPolicyReview", + "template": "template is the PodTemplateSpec to check. The template.spec.serviceAccountName field is used if serviceAccountNames is empty, unless the template.spec.serviceAccountName is empty, in which case \"default\" is used. If serviceAccountNames is specified, template.spec.serviceAccountName is ignored.", + "serviceAccountNames": "serviceAccountNames is an optional set of ServiceAccounts to run the check with. If serviceAccountNames is empty, the template.spec.serviceAccountName is used, unless it's empty, in which case \"default\" is used instead. If serviceAccountNames is specified, template.spec.serviceAccountName is ignored.", +} + +func (PodSecurityPolicyReviewSpec) SwaggerDoc() map[string]string { + return map_PodSecurityPolicyReviewSpec +} + +var map_PodSecurityPolicyReviewStatus = map[string]string{ + "": "PodSecurityPolicyReviewStatus represents the status of PodSecurityPolicyReview.", + "allowedServiceAccounts": "allowedServiceAccounts returns the list of service accounts in *this* namespace that have the power to create the PodTemplateSpec.", +} + +func (PodSecurityPolicyReviewStatus) SwaggerDoc() map[string]string { + return map_PodSecurityPolicyReviewStatus +} + +var map_PodSecurityPolicySelfSubjectReview = map[string]string{ + "": "PodSecurityPolicySelfSubjectReview checks whether this user/SA tuple can create the PodTemplateSpec\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec defines specification the PodSecurityPolicySelfSubjectReview.", + "status": "status represents the current information/status for the PodSecurityPolicySelfSubjectReview.", +} + +func (PodSecurityPolicySelfSubjectReview) SwaggerDoc() map[string]string { + return map_PodSecurityPolicySelfSubjectReview +} + +var map_PodSecurityPolicySelfSubjectReviewSpec = map[string]string{ + "": "PodSecurityPolicySelfSubjectReviewSpec contains specification for PodSecurityPolicySelfSubjectReview.", + "template": "template is the PodTemplateSpec to check.", +} + +func (PodSecurityPolicySelfSubjectReviewSpec) SwaggerDoc() map[string]string { + return map_PodSecurityPolicySelfSubjectReviewSpec +} + +var map_PodSecurityPolicySubjectReview = map[string]string{ + "": "PodSecurityPolicySubjectReview checks whether a particular user/SA tuple can create the PodTemplateSpec.\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec defines specification for the PodSecurityPolicySubjectReview.", + "status": "status represents the current information/status for the PodSecurityPolicySubjectReview.", +} + +func (PodSecurityPolicySubjectReview) SwaggerDoc() map[string]string { + return map_PodSecurityPolicySubjectReview +} + +var map_PodSecurityPolicySubjectReviewSpec = map[string]string{ + "": "PodSecurityPolicySubjectReviewSpec defines specification for PodSecurityPolicySubjectReview", + "template": "template is the PodTemplateSpec to check. If template.spec.serviceAccountName is empty it will not be defaulted. If its non-empty, it will be checked.", + "user": "user is the user you're testing for. If you specify \"user\" but not \"group\", then is it interpreted as \"What if user were not a member of any groups. If user and groups are empty, then the check is performed using *only* the serviceAccountName in the template.", + "groups": "groups is the groups you're testing for.", +} + +func (PodSecurityPolicySubjectReviewSpec) SwaggerDoc() map[string]string { + return map_PodSecurityPolicySubjectReviewSpec +} + +var map_PodSecurityPolicySubjectReviewStatus = map[string]string{ + "": "PodSecurityPolicySubjectReviewStatus contains information/status for PodSecurityPolicySubjectReview.", + "allowedBy": "allowedBy is a reference to the rule that allows the PodTemplateSpec. A rule can be a SecurityContextConstraint or a PodSecurityPolicy A `nil`, indicates that it was denied.", + "reason": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available.", + "template": "template is the PodTemplateSpec after the defaulting is applied.", +} + +func (PodSecurityPolicySubjectReviewStatus) SwaggerDoc() map[string]string { + return map_PodSecurityPolicySubjectReviewStatus +} + +var map_RangeAllocation = map[string]string{ + "": "RangeAllocation is used so we can easily expose a RangeAllocation typed for security group\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "range": "range is a string representing a unique label for a range of uids, \"1000000000-2000000000/10000\".", + "data": "data is a byte array representing the serialized state of a range allocation. It is a bitmap with each bit set to one to represent a range is taken.", +} + +func (RangeAllocation) SwaggerDoc() map[string]string { + return map_RangeAllocation +} + +var map_RangeAllocationList = map[string]string{ + "": "RangeAllocationList is a list of RangeAllocations objects\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "List of RangeAllocations.", +} + +func (RangeAllocationList) SwaggerDoc() map[string]string { + return map_RangeAllocationList +} + +var map_RunAsUserStrategyOptions = map[string]string{ + "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.", + "type": "type is the strategy that will dictate what RunAsUser is used in the SecurityContext.", + "uid": "uid is the user id that containers must run as. Required for the MustRunAs strategy if not using namespace/service account allocated uids.", + "uidRangeMin": "uidRangeMin defines the min value for a strategy that allocates by range.", + "uidRangeMax": "uidRangeMax defines the max value for a strategy that allocates by range.", +} + +func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { + return map_RunAsUserStrategyOptions +} + +var map_SELinuxContextStrategyOptions = map[string]string{ + "": "SELinuxContextStrategyOptions defines the strategy type and any options used to create the strategy.", + "type": "type is the strategy that will dictate what SELinux context is used in the SecurityContext.", + "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs", +} + +func (SELinuxContextStrategyOptions) SwaggerDoc() map[string]string { + return map_SELinuxContextStrategyOptions +} + +var map_SecurityContextConstraints = map[string]string{ + "": "SecurityContextConstraints governs the ability to make requests that affect the SecurityContext that will be applied to a container. For historical reasons SCC was exposed under the core Kubernetes API group. That exposure is deprecated and will be removed in a future release - users should instead use the security.openshift.io group to manage SecurityContextConstraints.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "priority": "priority influences the sort order of SCCs when evaluating which SCCs to try first for a given pod request based on access in the Users and Groups fields. The higher the int, the higher priority. An unset value is considered a 0 priority. If scores for multiple SCCs are equal they will be sorted from most restrictive to least restrictive. If both priorities and restrictions are equal the SCCs will be sorted by name.", + "allowPrivilegedContainer": "allowPrivilegedContainer determines if a container can request to be run as privileged.", + "defaultAddCapabilities": "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.", + "requiredDropCapabilities": "requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", + "allowedCapabilities": "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field maybe added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. To allow all capabilities you may use '*'.", + "allowHostDirVolumePlugin": "allowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin", + "volumes": "volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use \"*\". To allow no volumes, set to [\"none\"].", + "allowedFlexVolumes": "allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"Volumes\" field.", + "allowHostNetwork": "allowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", + "allowHostPorts": "allowHostPorts determines if the policy allows host ports in the containers.", + "allowHostPID": "allowHostPID determines if the policy allows host pid in the containers.", + "allowHostIPC": "allowHostIPC determines if the policy allows host ipc in the containers.", + "userNamespaceLevel": "userNamespaceLevel determines if the policy allows host users in containers. Valid values are \"AllowHostLevel\", \"RequirePodLevel\", and omitted. When \"AllowHostLevel\" is set, a pod author may set `hostUsers` to either `true` or `false`. When \"RequirePodLevel\" is set, a pod author must set `hostUsers` to `false`. When omitted, the default value is \"AllowHostLevel\".", + "defaultAllowPrivilegeEscalation": "defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", + "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", + "seLinuxContext": "seLinuxContext is the strategy that will dictate what labels will be set in the SecurityContext.", + "runAsUser": "runAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext.", + "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", + "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", + "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the SCC should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", + "users": "The users who have permissions to use this security context constraints", + "groups": "The groups that have permission to use this security context constraints", + "seccompProfiles": "seccompProfiles lists the allowed profiles that may be set for the pod or container's seccomp annotations. An unset (nil) or empty value means that no profiles may be specifid by the pod or container.\tThe wildcard '*' may be used to allow all profiles. When used to generate a value for a pod the first non-wildcard profile will be used as the default.", + "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", + "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", +} + +func (SecurityContextConstraints) SwaggerDoc() map[string]string { + return map_SecurityContextConstraints +} + +var map_SecurityContextConstraintsList = map[string]string{ + "": "SecurityContextConstraintsList is a list of SecurityContextConstraints objects\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "List of security context constraints.", +} + +func (SecurityContextConstraintsList) SwaggerDoc() map[string]string { + return map_SecurityContextConstraintsList +} + +var map_ServiceAccountPodSecurityPolicyReviewStatus = map[string]string{ + "": "ServiceAccountPodSecurityPolicyReviewStatus represents ServiceAccount name and related review status", + "name": "name contains the allowed and the denied ServiceAccount name", +} + +func (ServiceAccountPodSecurityPolicyReviewStatus) SwaggerDoc() map[string]string { + return map_ServiceAccountPodSecurityPolicyReviewStatus +} + +var map_SupplementalGroupsStrategyOptions = map[string]string{ + "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.", + "type": "type is the strategy that will dictate what supplemental groups is used in the SecurityContext.", + "ranges": "ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end.", +} + +func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { + return map_SupplementalGroupsStrategyOptions +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/template/v1/codec.go b/vendor/github.com/openshift/api/template/v1/codec.go new file mode 100644 index 0000000000000..9e9177ed6a7da --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/codec.go @@ -0,0 +1,33 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/openshift/api/pkg/serialization" +) + +var _ runtime.NestedObjectDecoder = &Template{} +var _ runtime.NestedObjectEncoder = &Template{} + +// DecodeNestedObjects decodes the object as a runtime.Unknown with JSON content. +func (c *Template) DecodeNestedObjects(d runtime.Decoder) error { + for i := range c.Objects { + if c.Objects[i].Object != nil { + continue + } + c.Objects[i].Object = &runtime.Unknown{ + ContentType: "application/json", + Raw: c.Objects[i].Raw, + } + } + return nil +} +func (c *Template) EncodeNestedObjects(e runtime.Encoder) error { + for i := range c.Objects { + if err := serialization.EncodeNestedRawExtension(unstructured.UnstructuredJSONScheme, &c.Objects[i]); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/openshift/api/template/v1/consts.go b/vendor/github.com/openshift/api/template/v1/consts.go new file mode 100644 index 0000000000000..cc8b49d55f681 --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/consts.go @@ -0,0 +1,16 @@ +package v1 + +const ( + // TemplateInstanceFinalizer is used to clean up the objects created by the template instance, + // when the template instance is deleted. + TemplateInstanceFinalizer = "template.openshift.io/finalizer" + + // TemplateInstanceOwner is a label applied to all objects created from a template instance + // which contains the uid of the template instance. + TemplateInstanceOwner = "template.openshift.io/template-instance-owner" + + // WaitForReadyAnnotation indicates that the TemplateInstance controller + // should wait for the object to be ready before reporting the template + // instantiation complete. + WaitForReadyAnnotation = "template.alpha.openshift.io/wait-for-ready" +) diff --git a/vendor/github.com/openshift/api/template/v1/doc.go b/vendor/github.com/openshift/api/template/v1/doc.go new file mode 100644 index 0000000000000..34f9f8d455456 --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/template/apis/template +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=template.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/template/v1/generated.pb.go b/vendor/github.com/openshift/api/template/v1/generated.pb.go new file mode 100644 index 0000000000000..df724d89d451a --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/generated.pb.go @@ -0,0 +1,4115 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/template/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *BrokerTemplateInstance) Reset() { *m = BrokerTemplateInstance{} } +func (*BrokerTemplateInstance) ProtoMessage() {} +func (*BrokerTemplateInstance) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{0} +} +func (m *BrokerTemplateInstance) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BrokerTemplateInstance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BrokerTemplateInstance) XXX_Merge(src proto.Message) { + xxx_messageInfo_BrokerTemplateInstance.Merge(m, src) +} +func (m *BrokerTemplateInstance) XXX_Size() int { + return m.Size() +} +func (m *BrokerTemplateInstance) XXX_DiscardUnknown() { + xxx_messageInfo_BrokerTemplateInstance.DiscardUnknown(m) +} + +var xxx_messageInfo_BrokerTemplateInstance proto.InternalMessageInfo + +func (m *BrokerTemplateInstanceList) Reset() { *m = BrokerTemplateInstanceList{} } +func (*BrokerTemplateInstanceList) ProtoMessage() {} +func (*BrokerTemplateInstanceList) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{1} +} +func (m *BrokerTemplateInstanceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BrokerTemplateInstanceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BrokerTemplateInstanceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_BrokerTemplateInstanceList.Merge(m, src) +} +func (m *BrokerTemplateInstanceList) XXX_Size() int { + return m.Size() +} +func (m *BrokerTemplateInstanceList) XXX_DiscardUnknown() { + xxx_messageInfo_BrokerTemplateInstanceList.DiscardUnknown(m) +} + +var xxx_messageInfo_BrokerTemplateInstanceList proto.InternalMessageInfo + +func (m *BrokerTemplateInstanceSpec) Reset() { *m = BrokerTemplateInstanceSpec{} } +func (*BrokerTemplateInstanceSpec) ProtoMessage() {} +func (*BrokerTemplateInstanceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{2} +} +func (m *BrokerTemplateInstanceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BrokerTemplateInstanceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BrokerTemplateInstanceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_BrokerTemplateInstanceSpec.Merge(m, src) +} +func (m *BrokerTemplateInstanceSpec) XXX_Size() int { + return m.Size() +} +func (m *BrokerTemplateInstanceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_BrokerTemplateInstanceSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_BrokerTemplateInstanceSpec proto.InternalMessageInfo + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{3} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo + +func (m *Parameter) Reset() { *m = Parameter{} } +func (*Parameter) ProtoMessage() {} +func (*Parameter) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{4} +} +func (m *Parameter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Parameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Parameter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Parameter.Merge(m, src) +} +func (m *Parameter) XXX_Size() int { + return m.Size() +} +func (m *Parameter) XXX_DiscardUnknown() { + xxx_messageInfo_Parameter.DiscardUnknown(m) +} + +var xxx_messageInfo_Parameter proto.InternalMessageInfo + +func (m *Template) Reset() { *m = Template{} } +func (*Template) ProtoMessage() {} +func (*Template) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{5} +} +func (m *Template) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Template) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Template) XXX_Merge(src proto.Message) { + xxx_messageInfo_Template.Merge(m, src) +} +func (m *Template) XXX_Size() int { + return m.Size() +} +func (m *Template) XXX_DiscardUnknown() { + xxx_messageInfo_Template.DiscardUnknown(m) +} + +var xxx_messageInfo_Template proto.InternalMessageInfo + +func (m *TemplateInstance) Reset() { *m = TemplateInstance{} } +func (*TemplateInstance) ProtoMessage() {} +func (*TemplateInstance) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{6} +} +func (m *TemplateInstance) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateInstance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateInstance) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateInstance.Merge(m, src) +} +func (m *TemplateInstance) XXX_Size() int { + return m.Size() +} +func (m *TemplateInstance) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateInstance.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateInstance proto.InternalMessageInfo + +func (m *TemplateInstanceCondition) Reset() { *m = TemplateInstanceCondition{} } +func (*TemplateInstanceCondition) ProtoMessage() {} +func (*TemplateInstanceCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{7} +} +func (m *TemplateInstanceCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateInstanceCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateInstanceCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateInstanceCondition.Merge(m, src) +} +func (m *TemplateInstanceCondition) XXX_Size() int { + return m.Size() +} +func (m *TemplateInstanceCondition) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateInstanceCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateInstanceCondition proto.InternalMessageInfo + +func (m *TemplateInstanceList) Reset() { *m = TemplateInstanceList{} } +func (*TemplateInstanceList) ProtoMessage() {} +func (*TemplateInstanceList) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{8} +} +func (m *TemplateInstanceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateInstanceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateInstanceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateInstanceList.Merge(m, src) +} +func (m *TemplateInstanceList) XXX_Size() int { + return m.Size() +} +func (m *TemplateInstanceList) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateInstanceList.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateInstanceList proto.InternalMessageInfo + +func (m *TemplateInstanceObject) Reset() { *m = TemplateInstanceObject{} } +func (*TemplateInstanceObject) ProtoMessage() {} +func (*TemplateInstanceObject) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{9} +} +func (m *TemplateInstanceObject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateInstanceObject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateInstanceObject) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateInstanceObject.Merge(m, src) +} +func (m *TemplateInstanceObject) XXX_Size() int { + return m.Size() +} +func (m *TemplateInstanceObject) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateInstanceObject.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateInstanceObject proto.InternalMessageInfo + +func (m *TemplateInstanceRequester) Reset() { *m = TemplateInstanceRequester{} } +func (*TemplateInstanceRequester) ProtoMessage() {} +func (*TemplateInstanceRequester) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{10} +} +func (m *TemplateInstanceRequester) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateInstanceRequester) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateInstanceRequester) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateInstanceRequester.Merge(m, src) +} +func (m *TemplateInstanceRequester) XXX_Size() int { + return m.Size() +} +func (m *TemplateInstanceRequester) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateInstanceRequester.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateInstanceRequester proto.InternalMessageInfo + +func (m *TemplateInstanceSpec) Reset() { *m = TemplateInstanceSpec{} } +func (*TemplateInstanceSpec) ProtoMessage() {} +func (*TemplateInstanceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{11} +} +func (m *TemplateInstanceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateInstanceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateInstanceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateInstanceSpec.Merge(m, src) +} +func (m *TemplateInstanceSpec) XXX_Size() int { + return m.Size() +} +func (m *TemplateInstanceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateInstanceSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateInstanceSpec proto.InternalMessageInfo + +func (m *TemplateInstanceStatus) Reset() { *m = TemplateInstanceStatus{} } +func (*TemplateInstanceStatus) ProtoMessage() {} +func (*TemplateInstanceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{12} +} +func (m *TemplateInstanceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateInstanceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateInstanceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateInstanceStatus.Merge(m, src) +} +func (m *TemplateInstanceStatus) XXX_Size() int { + return m.Size() +} +func (m *TemplateInstanceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateInstanceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateInstanceStatus proto.InternalMessageInfo + +func (m *TemplateList) Reset() { *m = TemplateList{} } +func (*TemplateList) ProtoMessage() {} +func (*TemplateList) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{13} +} +func (m *TemplateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateList.Merge(m, src) +} +func (m *TemplateList) XXX_Size() int { + return m.Size() +} +func (m *TemplateList) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateList.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*BrokerTemplateInstance)(nil), "github.com.openshift.api.template.v1.BrokerTemplateInstance") + proto.RegisterType((*BrokerTemplateInstanceList)(nil), "github.com.openshift.api.template.v1.BrokerTemplateInstanceList") + proto.RegisterType((*BrokerTemplateInstanceSpec)(nil), "github.com.openshift.api.template.v1.BrokerTemplateInstanceSpec") + proto.RegisterType((*ExtraValue)(nil), "github.com.openshift.api.template.v1.ExtraValue") + proto.RegisterType((*Parameter)(nil), "github.com.openshift.api.template.v1.Parameter") + proto.RegisterType((*Template)(nil), "github.com.openshift.api.template.v1.Template") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.template.v1.Template.LabelsEntry") + proto.RegisterType((*TemplateInstance)(nil), "github.com.openshift.api.template.v1.TemplateInstance") + proto.RegisterType((*TemplateInstanceCondition)(nil), "github.com.openshift.api.template.v1.TemplateInstanceCondition") + proto.RegisterType((*TemplateInstanceList)(nil), "github.com.openshift.api.template.v1.TemplateInstanceList") + proto.RegisterType((*TemplateInstanceObject)(nil), "github.com.openshift.api.template.v1.TemplateInstanceObject") + proto.RegisterType((*TemplateInstanceRequester)(nil), "github.com.openshift.api.template.v1.TemplateInstanceRequester") + proto.RegisterMapType((map[string]ExtraValue)(nil), "github.com.openshift.api.template.v1.TemplateInstanceRequester.ExtraEntry") + proto.RegisterType((*TemplateInstanceSpec)(nil), "github.com.openshift.api.template.v1.TemplateInstanceSpec") + proto.RegisterType((*TemplateInstanceStatus)(nil), "github.com.openshift.api.template.v1.TemplateInstanceStatus") + proto.RegisterType((*TemplateList)(nil), "github.com.openshift.api.template.v1.TemplateList") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/template/v1/generated.proto", fileDescriptor_8d3ee9f55fa8363e) +} + +var fileDescriptor_8d3ee9f55fa8363e = []byte{ + // 1246 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x57, 0x4d, 0x6f, 0x5b, 0x45, + 0x17, 0xf6, 0xf5, 0x57, 0xec, 0x71, 0xdb, 0x37, 0x9a, 0xb7, 0xaa, 0x2e, 0x96, 0x6a, 0x5b, 0xb7, + 0x15, 0x0a, 0xa8, 0xb9, 0x26, 0x51, 0x28, 0x25, 0x42, 0x02, 0x2e, 0x49, 0xab, 0x94, 0x14, 0xd0, + 0x24, 0x45, 0x08, 0xb2, 0x60, 0x7c, 0x3d, 0x76, 0x6e, 0xe3, 0xfb, 0xc1, 0xcc, 0x38, 0xd4, 0xbb, + 0x2e, 0xf8, 0x01, 0x2c, 0x59, 0xf2, 0x13, 0x58, 0xb2, 0x42, 0x62, 0x97, 0x65, 0xd9, 0x75, 0x01, + 0x16, 0x31, 0x2b, 0xfe, 0x00, 0x48, 0x65, 0x83, 0x66, 0xee, 0xdc, 0x0f, 0x7f, 0x51, 0x27, 0x95, + 0xda, 0x9d, 0xef, 0x99, 0xf3, 0x3c, 0x67, 0xce, 0x99, 0x33, 0xcf, 0x1c, 0x83, 0x8d, 0xae, 0xc3, + 0x0f, 0xfb, 0x2d, 0xd3, 0xf6, 0xdd, 0xa6, 0x1f, 0x10, 0x8f, 0x1d, 0x3a, 0x1d, 0xde, 0xc4, 0x81, + 0xd3, 0xe4, 0xc4, 0x0d, 0x7a, 0x98, 0x93, 0xe6, 0xf1, 0x5a, 0xb3, 0x4b, 0x3c, 0x42, 0x31, 0x27, + 0x6d, 0x33, 0xa0, 0x3e, 0xf7, 0xe1, 0xf5, 0x04, 0x65, 0xc6, 0x28, 0x13, 0x07, 0x8e, 0x19, 0xa1, + 0xcc, 0xe3, 0xb5, 0xea, 0x6a, 0x8a, 0xbb, 0xeb, 0x77, 0xfd, 0xa6, 0x04, 0xb7, 0xfa, 0x1d, 0xf9, + 0x25, 0x3f, 0xe4, 0xaf, 0x90, 0xb4, 0x6a, 0x1c, 0xdd, 0x62, 0xa6, 0xe3, 0xcb, 0xe0, 0xb6, 0x4f, + 0x67, 0x05, 0xae, 0x6e, 0x24, 0x3e, 0x2e, 0xb6, 0x0f, 0x1d, 0x8f, 0xd0, 0x41, 0x33, 0x38, 0xea, + 0x0a, 0x03, 0x6b, 0xba, 0x84, 0xe3, 0x59, 0xa8, 0xe6, 0x3c, 0x14, 0xed, 0x7b, 0xdc, 0x71, 0xc9, + 0x14, 0xe0, 0xe6, 0xb3, 0x00, 0xcc, 0x3e, 0x24, 0x2e, 0x9e, 0xc4, 0x19, 0x43, 0x0d, 0x5c, 0xb1, + 0xa8, 0x7f, 0x44, 0xe8, 0xbe, 0xaa, 0xc3, 0x8e, 0xc7, 0x38, 0xf6, 0x6c, 0x02, 0xbf, 0x04, 0x25, + 0xb1, 0xbd, 0x36, 0xe6, 0x58, 0xd7, 0x1a, 0xda, 0x4a, 0x65, 0xfd, 0x0d, 0x33, 0x8c, 0x62, 0xa6, + 0xa3, 0x98, 0xc1, 0x51, 0x57, 0x18, 0x98, 0x29, 0xbc, 0xcd, 0xe3, 0x35, 0xf3, 0xe3, 0xd6, 0x03, + 0x62, 0xf3, 0x7b, 0x84, 0x63, 0x0b, 0x9e, 0x0c, 0xeb, 0x99, 0xd1, 0xb0, 0x0e, 0x12, 0x1b, 0x8a, + 0x59, 0x61, 0x0b, 0xe4, 0x59, 0x40, 0x6c, 0x3d, 0x2b, 0xd9, 0xdf, 0x33, 0x17, 0x39, 0x23, 0x73, + 0xf6, 0x6e, 0xf7, 0x02, 0x62, 0x5b, 0x17, 0x54, 0xb4, 0xbc, 0xf8, 0x42, 0x92, 0xdb, 0xf8, 0x4d, + 0x03, 0xd5, 0xd9, 0x90, 0x5d, 0x87, 0x71, 0x78, 0x30, 0x95, 0xa4, 0xb9, 0x58, 0x92, 0x02, 0x2d, + 0x53, 0x5c, 0x56, 0x41, 0x4b, 0x91, 0x25, 0x95, 0x20, 0x06, 0x05, 0x87, 0x13, 0x97, 0xe9, 0xd9, + 0x46, 0x6e, 0xa5, 0xb2, 0xfe, 0xce, 0xf3, 0x64, 0x68, 0x5d, 0x54, 0x81, 0x0a, 0x3b, 0x82, 0x12, + 0x85, 0xcc, 0xc6, 0x37, 0xd9, 0x79, 0xf9, 0x89, 0x22, 0x40, 0x07, 0x2c, 0xf3, 0x09, 0xbb, 0xca, + 0xf3, 0x5a, 0x2a, 0x4f, 0x53, 0x74, 0x6f, 0x72, 0x74, 0x88, 0x74, 0x08, 0x25, 0x22, 0xa6, 0xae, + 0x62, 0x2e, 0x4f, 0x92, 0xa3, 0x29, 0x5a, 0xf8, 0x21, 0x28, 0x32, 0x62, 0x53, 0xc2, 0xd5, 0x79, + 0x2e, 0x14, 0xe0, 0x92, 0x0a, 0x50, 0xdc, 0x93, 0x50, 0xa4, 0x28, 0xa0, 0x09, 0x40, 0xcb, 0xf1, + 0xda, 0x8e, 0xd7, 0xdd, 0xd9, 0x62, 0x7a, 0xae, 0x91, 0x5b, 0x29, 0x5b, 0x97, 0x44, 0x23, 0x59, + 0xb1, 0x15, 0xa5, 0x3c, 0x8c, 0xb7, 0x00, 0xd8, 0x7e, 0xc8, 0x29, 0xfe, 0x14, 0xf7, 0xfa, 0x04, + 0xd6, 0xa3, 0xba, 0x6b, 0x12, 0x58, 0x9e, 0xac, 0xda, 0x66, 0xe9, 0xbb, 0xef, 0xeb, 0x99, 0x47, + 0xbf, 0x36, 0x32, 0xc6, 0x4f, 0x59, 0x50, 0xfe, 0x04, 0x53, 0xec, 0x12, 0x4e, 0x28, 0x6c, 0x80, + 0xbc, 0x87, 0xdd, 0xb0, 0x44, 0xe5, 0xa4, 0x9f, 0x3e, 0xc2, 0x2e, 0x41, 0x72, 0x05, 0xbe, 0x09, + 0x2a, 0x6d, 0x87, 0x05, 0x3d, 0x3c, 0x10, 0x46, 0x99, 0x6a, 0xd9, 0xfa, 0xbf, 0x72, 0xac, 0x6c, + 0x25, 0x4b, 0x28, 0xed, 0x27, 0x61, 0x84, 0xd9, 0xd4, 0x09, 0xb8, 0xe3, 0x7b, 0x7a, 0x6e, 0x02, + 0x96, 0x2c, 0xa1, 0xb4, 0x1f, 0xbc, 0x06, 0x0a, 0xc7, 0x22, 0x23, 0x3d, 0x2f, 0x01, 0x71, 0x0b, + 0xc8, 0x34, 0x51, 0xb8, 0x06, 0x6f, 0x80, 0x52, 0x74, 0xad, 0xf5, 0x82, 0xf4, 0x8b, 0x7b, 0xf2, + 0x8e, 0xb2, 0xa3, 0xd8, 0x43, 0xa4, 0xd8, 0xa1, 0xbe, 0xab, 0x17, 0xc7, 0x53, 0xbc, 0x4d, 0x7d, + 0x17, 0xc9, 0x15, 0xc1, 0x47, 0xc9, 0x57, 0x7d, 0x87, 0x92, 0xb6, 0xbe, 0xd4, 0xd0, 0x56, 0x4a, + 0x09, 0x1f, 0x52, 0x76, 0x14, 0x7b, 0x18, 0xff, 0xe4, 0x40, 0x29, 0xea, 0x8e, 0x17, 0xa0, 0x19, + 0xaf, 0x81, 0x25, 0x97, 0x30, 0x86, 0xbb, 0x51, 0xed, 0xff, 0xa7, 0xdc, 0x97, 0xee, 0x85, 0x66, + 0x14, 0xad, 0xc3, 0xcf, 0xc0, 0x92, 0x2f, 0x29, 0xc2, 0x06, 0xaa, 0xac, 0xaf, 0xce, 0xdd, 0x8b, + 0x52, 0x49, 0x13, 0xe1, 0xaf, 0xb7, 0x1f, 0x72, 0xe2, 0x31, 0xc7, 0xf7, 0x12, 0xe6, 0x70, 0x23, + 0x0c, 0x45, 0x74, 0xd0, 0x06, 0x20, 0x88, 0x7a, 0x86, 0xe9, 0x79, 0x49, 0xde, 0x5c, 0xec, 0x72, + 0xc7, 0xbd, 0x96, 0xe4, 0x19, 0x9b, 0x18, 0x4a, 0xd1, 0xc2, 0x43, 0x50, 0xec, 0xe1, 0x16, 0xe9, + 0x31, 0xbd, 0x20, 0x03, 0x6c, 0x2e, 0x16, 0x20, 0x3a, 0x0b, 0x73, 0x57, 0x82, 0xb7, 0x3d, 0x4e, + 0x07, 0xd6, 0x65, 0x15, 0xeb, 0x42, 0x98, 0x4a, 0xb8, 0x84, 0x14, 0x7f, 0xf5, 0x6d, 0x50, 0x49, + 0x39, 0xc3, 0x65, 0x90, 0x3b, 0x22, 0x83, 0xf0, 0x0e, 0x20, 0xf1, 0x13, 0x5e, 0x8e, 0xda, 0x50, + 0x96, 0x5c, 0xf5, 0xdd, 0x66, 0xf6, 0x96, 0x66, 0xfc, 0x98, 0x05, 0xcb, 0x2f, 0xe1, 0xe5, 0x38, + 0x18, 0x7b, 0x39, 0xce, 0x58, 0x99, 0x67, 0xbd, 0x19, 0xb0, 0x0d, 0x8a, 0x8c, 0x63, 0xde, 0x67, + 0xf2, 0x9e, 0x2e, 0xac, 0xdb, 0x53, 0xfc, 0x92, 0x23, 0x25, 0x71, 0xf2, 0x1b, 0x29, 0x6e, 0xe3, + 0xef, 0x2c, 0x78, 0x65, 0x12, 0xf2, 0x81, 0xef, 0xb5, 0x1d, 0x79, 0xf3, 0xdf, 0x07, 0x79, 0x3e, + 0x08, 0x22, 0x25, 0x5a, 0x8d, 0x76, 0xb9, 0x3f, 0x08, 0xc8, 0xd3, 0x61, 0xfd, 0xea, 0x5c, 0xa0, + 0x70, 0x40, 0x12, 0x0a, 0x77, 0xe3, 0x34, 0xc2, 0x9b, 0xb2, 0x31, 0xbe, 0x91, 0xa7, 0xc3, 0xfa, + 0x8c, 0x01, 0xc6, 0x8c, 0x99, 0xc6, 0xb7, 0x0b, 0x8f, 0x01, 0xec, 0x61, 0xc6, 0xf7, 0x29, 0xf6, + 0x58, 0x18, 0xc9, 0x71, 0x89, 0x2a, 0xd0, 0xeb, 0x8b, 0x1d, 0xaf, 0x40, 0x58, 0x55, 0xb5, 0x0b, + 0xb8, 0x3b, 0xc5, 0x86, 0x66, 0x44, 0x80, 0xaf, 0x82, 0x22, 0x25, 0x98, 0xf9, 0x9e, 0xd2, 0xc0, + 0xb8, 0x9c, 0x48, 0x5a, 0x91, 0x5a, 0x4d, 0x0b, 0x43, 0xe1, 0xbf, 0x85, 0xc1, 0xf8, 0x45, 0x03, + 0x97, 0x5f, 0xc2, 0x34, 0xf0, 0xc5, 0xf8, 0x34, 0x70, 0xf3, 0x7c, 0x5d, 0x35, 0x67, 0x0e, 0x38, + 0x00, 0x57, 0x26, 0x3d, 0xc3, 0x9b, 0x03, 0x2d, 0x90, 0xa3, 0xa4, 0x73, 0x96, 0x57, 0xbf, 0xa2, + 0x22, 0xe4, 0x10, 0xe9, 0x20, 0x01, 0x36, 0xfe, 0x9c, 0xd1, 0xab, 0xe2, 0x2d, 0x20, 0x4c, 0xbc, + 0x9a, 0x37, 0x40, 0xa9, 0xcf, 0x08, 0x4d, 0xbd, 0x9c, 0x71, 0x19, 0xee, 0x2b, 0x3b, 0x8a, 0x3d, + 0xe0, 0x55, 0x90, 0xeb, 0x3b, 0x6d, 0xd5, 0x93, 0x71, 0xa8, 0xfb, 0x3b, 0x5b, 0x48, 0xd8, 0xa1, + 0x01, 0x8a, 0x5d, 0xea, 0xf7, 0x83, 0xe8, 0xd5, 0x07, 0xe2, 0xac, 0xef, 0x48, 0x0b, 0x52, 0x2b, + 0xd0, 0x07, 0x05, 0x22, 0x5e, 0x7b, 0x25, 0xbd, 0x77, 0xcf, 0x57, 0xc9, 0x38, 0x01, 0x53, 0x8e, + 0x0e, 0xa1, 0x52, 0xc6, 0xd5, 0x95, 0x36, 0x14, 0xc6, 0xa9, 0x3e, 0x50, 0xe3, 0xc5, 0x3c, 0x81, + 0xbc, 0x9d, 0x16, 0x48, 0x21, 0x77, 0x0b, 0x6d, 0x28, 0x99, 0x58, 0xd2, 0x92, 0xfa, 0x43, 0x76, + 0xba, 0x3b, 0xe5, 0x2c, 0x77, 0x00, 0x4a, 0x11, 0x3a, 0xee, 0xce, 0x33, 0x25, 0x9e, 0x1c, 0x4b, + 0x64, 0x41, 0x31, 0xa3, 0x54, 0x8b, 0xf4, 0xf8, 0xb6, 0x32, 0xab, 0x53, 0x76, 0x7d, 0x1b, 0xf7, + 0x26, 0xdb, 0x05, 0xcc, 0x98, 0xdf, 0x7a, 0xa0, 0x4c, 0xa3, 0xf2, 0x2a, 0x91, 0x78, 0xf7, 0x39, + 0x4f, 0xc9, 0xba, 0x38, 0x1a, 0xd6, 0xcb, 0xf1, 0x27, 0x4a, 0x02, 0x18, 0x7f, 0x69, 0xd3, 0xdd, + 0x1f, 0xca, 0x17, 0x64, 0x00, 0xd8, 0x91, 0xa2, 0x85, 0xf3, 0xe0, 0xb9, 0x77, 0x12, 0x2b, 0x63, + 0xf2, 0x38, 0xc5, 0x26, 0x86, 0x52, 0x61, 0x60, 0x37, 0x99, 0x3c, 0xce, 0x34, 0xf9, 0xcf, 0xbe, + 0xc1, 0xf3, 0x07, 0x11, 0xe3, 0x67, 0x0d, 0x5c, 0x88, 0x40, 0x2f, 0x40, 0xc1, 0xf6, 0xc6, 0x15, + 0xec, 0xac, 0xed, 0x37, 0x53, 0xb9, 0xac, 0xbb, 0x27, 0xa7, 0xb5, 0xcc, 0xe3, 0xd3, 0x5a, 0xe6, + 0xc9, 0x69, 0x2d, 0xf3, 0x68, 0x54, 0xd3, 0x4e, 0x46, 0x35, 0xed, 0xf1, 0xa8, 0xa6, 0x3d, 0x19, + 0xd5, 0xb4, 0xdf, 0x47, 0x35, 0xed, 0xdb, 0x3f, 0x6a, 0x99, 0xcf, 0xaf, 0x2f, 0xf2, 0xb7, 0xff, + 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd0, 0x61, 0xc4, 0xab, 0x1d, 0x10, 0x00, 0x00, +} + +func (m *BrokerTemplateInstance) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BrokerTemplateInstance) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BrokerTemplateInstance) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BrokerTemplateInstanceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BrokerTemplateInstanceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BrokerTemplateInstanceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BrokerTemplateInstanceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BrokerTemplateInstanceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BrokerTemplateInstanceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.BindingIDs) > 0 { + for iNdEx := len(m.BindingIDs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.BindingIDs[iNdEx]) + copy(dAtA[i:], m.BindingIDs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BindingIDs[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.TemplateInstance.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m ExtraValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Parameter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Parameter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Parameter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Required { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + i -= len(m.From) + copy(dAtA[i:], m.From) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.From))) + i-- + dAtA[i] = 0x32 + i -= len(m.Generate) + copy(dAtA[i:], m.Generate) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Generate))) + i-- + dAtA[i] = 0x2a + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x22 + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x1a + i -= len(m.DisplayName) + copy(dAtA[i:], m.DisplayName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DisplayName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Template) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Template) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Template) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ObjectLabels) > 0 { + keysForObjectLabels := make([]string, 0, len(m.ObjectLabels)) + for k := range m.ObjectLabels { + keysForObjectLabels = append(keysForObjectLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForObjectLabels) + for iNdEx := len(keysForObjectLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.ObjectLabels[string(keysForObjectLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForObjectLabels[iNdEx]) + copy(dAtA[i:], keysForObjectLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForObjectLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Parameters) > 0 { + for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Parameters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Objects) > 0 { + for iNdEx := len(m.Objects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Objects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TemplateInstance) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateInstance) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateInstance) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TemplateInstanceCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateInstanceCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateInstanceCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TemplateInstanceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateInstanceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateInstanceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TemplateInstanceObject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateInstanceObject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateInstanceObject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Ref.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TemplateInstanceRequester) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateInstanceRequester) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateInstanceRequester) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) + for k := range m.Extra { + keysForExtra = append(keysForExtra, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TemplateInstanceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateInstanceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateInstanceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Requester != nil { + { + size, err := m.Requester.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Secret != nil { + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TemplateInstanceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateInstanceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateInstanceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Objects) > 0 { + for iNdEx := len(m.Objects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Objects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *TemplateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *BrokerTemplateInstance) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BrokerTemplateInstanceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BrokerTemplateInstanceSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.TemplateInstance.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.BindingIDs) > 0 { + for _, s := range m.BindingIDs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Parameter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DisplayName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Generate) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.From) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Template) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Objects) > 0 { + for _, e := range m.Objects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Parameters) > 0 { + for _, e := range m.Parameters { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ObjectLabels) > 0 { + for k, v := range m.ObjectLabels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *TemplateInstance) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TemplateInstanceCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TemplateInstanceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TemplateInstanceObject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Ref.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TemplateInstanceRequester) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Username) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *TemplateInstanceSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Requester != nil { + l = m.Requester.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TemplateInstanceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Objects) > 0 { + for _, e := range m.Objects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TemplateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *BrokerTemplateInstance) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BrokerTemplateInstance{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "BrokerTemplateInstanceSpec", "BrokerTemplateInstanceSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BrokerTemplateInstanceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]BrokerTemplateInstance{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "BrokerTemplateInstance", "BrokerTemplateInstance", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&BrokerTemplateInstanceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *BrokerTemplateInstanceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BrokerTemplateInstanceSpec{`, + `TemplateInstance:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TemplateInstance), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `Secret:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Secret), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `BindingIDs:` + fmt.Sprintf("%v", this.BindingIDs) + `,`, + `}`, + }, "") + return s +} +func (this *Parameter) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Parameter{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DisplayName:` + fmt.Sprintf("%v", this.DisplayName) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Generate:` + fmt.Sprintf("%v", this.Generate) + `,`, + `From:` + fmt.Sprintf("%v", this.From) + `,`, + `Required:` + fmt.Sprintf("%v", this.Required) + `,`, + `}`, + }, "") + return s +} +func (this *Template) String() string { + if this == nil { + return "nil" + } + repeatedStringForObjects := "[]RawExtension{" + for _, f := range this.Objects { + repeatedStringForObjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForObjects += "}" + repeatedStringForParameters := "[]Parameter{" + for _, f := range this.Parameters { + repeatedStringForParameters += strings.Replace(strings.Replace(f.String(), "Parameter", "Parameter", 1), `&`, ``, 1) + "," + } + repeatedStringForParameters += "}" + keysForObjectLabels := make([]string, 0, len(this.ObjectLabels)) + for k := range this.ObjectLabels { + keysForObjectLabels = append(keysForObjectLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForObjectLabels) + mapStringForObjectLabels := "map[string]string{" + for _, k := range keysForObjectLabels { + mapStringForObjectLabels += fmt.Sprintf("%v: %v,", k, this.ObjectLabels[k]) + } + mapStringForObjectLabels += "}" + s := strings.Join([]string{`&Template{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Objects:` + repeatedStringForObjects + `,`, + `Parameters:` + repeatedStringForParameters + `,`, + `ObjectLabels:` + mapStringForObjectLabels + `,`, + `}`, + }, "") + return s +} +func (this *TemplateInstance) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TemplateInstance{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TemplateInstanceSpec", "TemplateInstanceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TemplateInstanceStatus", "TemplateInstanceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TemplateInstanceCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TemplateInstanceCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *TemplateInstanceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]TemplateInstance{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "TemplateInstance", "TemplateInstance", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&TemplateInstanceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *TemplateInstanceObject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TemplateInstanceObject{`, + `Ref:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ref), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TemplateInstanceRequester) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&TemplateInstanceRequester{`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Extra:` + mapStringForExtra + `,`, + `}`, + }, "") + return s +} +func (this *TemplateInstanceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TemplateInstanceSpec{`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "Template", "Template", 1), `&`, ``, 1) + `,`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `Requester:` + strings.Replace(this.Requester.String(), "TemplateInstanceRequester", "TemplateInstanceRequester", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TemplateInstanceStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]TemplateInstanceCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "TemplateInstanceCondition", "TemplateInstanceCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + repeatedStringForObjects := "[]TemplateInstanceObject{" + for _, f := range this.Objects { + repeatedStringForObjects += strings.Replace(strings.Replace(f.String(), "TemplateInstanceObject", "TemplateInstanceObject", 1), `&`, ``, 1) + "," + } + repeatedStringForObjects += "}" + s := strings.Join([]string{`&TemplateInstanceStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `Objects:` + repeatedStringForObjects + `,`, + `}`, + }, "") + return s +} +func (this *TemplateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Template{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Template", "Template", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&TemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *BrokerTemplateInstance) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BrokerTemplateInstance: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BrokerTemplateInstance: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BrokerTemplateInstanceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BrokerTemplateInstanceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BrokerTemplateInstanceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, BrokerTemplateInstance{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BrokerTemplateInstanceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BrokerTemplateInstanceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BrokerTemplateInstanceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateInstance", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TemplateInstance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BindingIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BindingIDs = append(m.BindingIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExtraValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Parameter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Parameter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Parameter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DisplayName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DisplayName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Generate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Generate = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.From = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Required", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Required = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Template) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Template: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Template: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Objects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Objects = append(m.Objects, runtime.RawExtension{}) + if err := m.Objects[len(m.Objects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parameters = append(m.Parameters, Parameter{}) + if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectLabels == nil { + m.ObjectLabels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ObjectLabels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateInstance) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateInstance: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateInstance: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateInstanceCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateInstanceCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateInstanceCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = TemplateInstanceConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateInstanceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateInstanceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateInstanceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, TemplateInstance{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateInstanceObject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateInstanceObject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateInstanceObject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Ref.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateInstanceRequester) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateInstanceRequester: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateInstanceRequester: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Extra[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateInstanceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateInstanceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateInstanceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &v11.LocalObjectReference{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requester", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Requester == nil { + m.Requester = &TemplateInstanceRequester{} + } + if err := m.Requester.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateInstanceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateInstanceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateInstanceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, TemplateInstanceCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Objects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Objects = append(m.Objects, TemplateInstanceObject{}) + if err := m.Objects[len(m.Objects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Template{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/template/v1/generated.proto b/vendor/github.com/openshift/api/template/v1/generated.proto new file mode 100644 index 0000000000000..8f27eb48a04c7 --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/generated.proto @@ -0,0 +1,262 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.template.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/template/v1"; + +// BrokerTemplateInstance holds the service broker-related state associated with +// a TemplateInstance. BrokerTemplateInstance is part of an experimental API. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BrokerTemplateInstance { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec describes the state of this BrokerTemplateInstance. + optional BrokerTemplateInstanceSpec spec = 2; +} + +// BrokerTemplateInstanceList is a list of BrokerTemplateInstance objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BrokerTemplateInstanceList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of BrokerTemplateInstances + repeated BrokerTemplateInstance items = 2; +} + +// BrokerTemplateInstanceSpec describes the state of a BrokerTemplateInstance. +message BrokerTemplateInstanceSpec { + // templateInstance is a reference to a TemplateInstance object residing + // in a namespace. + optional .k8s.io.api.core.v1.ObjectReference templateInstance = 1; + + // secret is a reference to a Secret object residing in a namespace, + // containing the necessary template parameters. + optional .k8s.io.api.core.v1.ObjectReference secret = 2; + + // bindingIDs is a list of 'binding_id's provided during successive bind + // calls to the template service broker. + repeated string bindingIDs = 3; +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ExtraValue { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// Parameter defines a name/value variable that is to be processed during +// the Template to Config transformation. +message Parameter { + // name must be set and it can be referenced in Template + // Items using ${PARAMETER_NAME}. Required. + optional string name = 1; + + // Optional: The name that will show in UI instead of parameter 'Name' + optional string displayName = 2; + + // description of a parameter. Optional. + optional string description = 3; + + // value holds the Parameter data. If specified, the generator will be + // ignored. The value replaces all occurrences of the Parameter ${Name} + // expression during the Template to Config transformation. Optional. + optional string value = 4; + + // generate specifies the generator to be used to generate random string + // from an input value specified by From field. The result string is + // stored into Value field. If empty, no generator is being used, leaving + // the result Value untouched. Optional. + // + // The only supported generator is "expression", which accepts a "from" + // value in the form of a simple regular expression containing the + // range expression "[a-zA-Z0-9]", and the length expression "a{length}". + // + // Examples: + // + // from | value + // ----------------------------- + // "test[0-9]{1}x" | "test7x" + // "[0-1]{8}" | "01001100" + // "0x[A-F0-9]{4}" | "0xB3AF" + // "[a-zA-Z0-9]{8}" | "hW4yQU5i" + optional string generate = 5; + + // from is an input value for the generator. Optional. + optional string from = 6; + + // Optional: Indicates the parameter must have a value. Defaults to false. + optional bool required = 7; +} + +// Template contains the inputs needed to produce a Config. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message Template { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // message is an optional instructional message that will + // be displayed when this template is instantiated. + // This field should inform the user how to utilize the newly created resources. + // Parameter substitution will be performed on the message before being + // displayed so that generated credentials and other parameters can be + // included in the output. + optional string message = 2; + + // objects is an array of resources to include in this template. + // If a namespace value is hardcoded in the object, it will be removed + // during template instantiation, however if the namespace value + // is, or contains, a ${PARAMETER_REFERENCE}, the resolved + // value after parameter substitution will be respected and the object + // will be created in that namespace. + // +kubebuilder:pruning:PreserveUnknownFields + repeated .k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3; + + // parameters is an optional array of Parameters used during the + // Template to Config transformation. + repeated Parameter parameters = 4; + + // labels is a optional set of labels that are applied to every + // object during the Template to Config transformation. + map labels = 5; +} + +// TemplateInstance requests and records the instantiation of a Template. +// TemplateInstance is part of an experimental API. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message TemplateInstance { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec describes the desired state of this TemplateInstance. + optional TemplateInstanceSpec spec = 2; + + // status describes the current state of this TemplateInstance. + // +optional + optional TemplateInstanceStatus status = 3; +} + +// TemplateInstanceCondition contains condition information for a +// TemplateInstance. +message TemplateInstanceCondition { + // type of the condition, currently Ready or InstantiateFailure. + optional string type = 1; + + // status of the condition, one of True, False or Unknown. + optional string status = 2; + + // lastTransitionTime is the last time a condition status transitioned from + // one state to another. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // reason is a brief machine readable explanation for the condition's last + // transition. + optional string reason = 4; + + // message is a human readable description of the details of the last + // transition, complementing reason. + optional string message = 5; +} + +// TemplateInstanceList is a list of TemplateInstance objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message TemplateInstanceList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of Templateinstances + repeated TemplateInstance items = 2; +} + +// TemplateInstanceObject references an object created by a TemplateInstance. +message TemplateInstanceObject { + // ref is a reference to the created object. When used under .spec, only + // name and namespace are used; these can contain references to parameters + // which will be substituted following the usual rules. + optional .k8s.io.api.core.v1.ObjectReference ref = 1; +} + +// TemplateInstanceRequester holds the identity of an agent requesting a +// template instantiation. +message TemplateInstanceRequester { + // username uniquely identifies this user among all active users. + optional string username = 1; + + // uid is a unique value that identifies this user across time; if this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + optional string uid = 2; + + // groups represent the groups this user is a part of. + repeated string groups = 3; + + // extra holds additional information provided by the authenticator. + map extra = 4; +} + +// TemplateInstanceSpec describes the desired state of a TemplateInstance. +message TemplateInstanceSpec { + // template is a full copy of the template for instantiation. + optional Template template = 1; + + // secret is a reference to a Secret object containing the necessary + // template parameters. + optional .k8s.io.api.core.v1.LocalObjectReference secret = 2; + + // requester holds the identity of the agent requesting the template + // instantiation. + // +optional + optional TemplateInstanceRequester requester = 3; +} + +// TemplateInstanceStatus describes the current state of a TemplateInstance. +message TemplateInstanceStatus { + // conditions represent the latest available observations of a + // TemplateInstance's current state. + repeated TemplateInstanceCondition conditions = 1; + + // objects references the objects created by the TemplateInstance. + repeated TemplateInstanceObject objects = 2; +} + +// TemplateList is a list of Template objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message TemplateList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of templates + repeated Template items = 2; +} + diff --git a/vendor/github.com/openshift/api/template/v1/legacy.go b/vendor/github.com/openshift/api/template/v1/legacy.go new file mode 100644 index 0000000000000..9266f3ac9e69f --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/legacy.go @@ -0,0 +1,24 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &Template{}, + &TemplateList{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + scheme.AddKnownTypeWithName(legacyGroupVersion.WithKind("TemplateConfig"), &Template{}) + scheme.AddKnownTypeWithName(legacyGroupVersion.WithKind("ProcessedTemplate"), &Template{}) + return nil +} diff --git a/vendor/github.com/openshift/api/template/v1/register.go b/vendor/github.com/openshift/api/template/v1/register.go new file mode 100644 index 0000000000000..e34ff5610b603 --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/register.go @@ -0,0 +1,43 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "template.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &Template{}, + &TemplateList{}, + &TemplateInstance{}, + &TemplateInstanceList{}, + &BrokerTemplateInstance{}, + &BrokerTemplateInstanceList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/template/v1/types.go b/vendor/github.com/openshift/api/template/v1/types.go new file mode 100644 index 0000000000000..5510b0f90b688 --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/types.go @@ -0,0 +1,294 @@ +package v1 + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Template contains the inputs needed to produce a Config. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Template struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // message is an optional instructional message that will + // be displayed when this template is instantiated. + // This field should inform the user how to utilize the newly created resources. + // Parameter substitution will be performed on the message before being + // displayed so that generated credentials and other parameters can be + // included in the output. + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + + // objects is an array of resources to include in this template. + // If a namespace value is hardcoded in the object, it will be removed + // during template instantiation, however if the namespace value + // is, or contains, a ${PARAMETER_REFERENCE}, the resolved + // value after parameter substitution will be respected and the object + // will be created in that namespace. + // +kubebuilder:pruning:PreserveUnknownFields + Objects []runtime.RawExtension `json:"objects" protobuf:"bytes,3,rep,name=objects"` + + // parameters is an optional array of Parameters used during the + // Template to Config transformation. + Parameters []Parameter `json:"parameters,omitempty" protobuf:"bytes,4,rep,name=parameters"` + + // labels is a optional set of labels that are applied to every + // object during the Template to Config transformation. + ObjectLabels map[string]string `json:"labels,omitempty" protobuf:"bytes,5,rep,name=labels"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TemplateList is a list of Template objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type TemplateList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of templates + Items []Template `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Parameter defines a name/value variable that is to be processed during +// the Template to Config transformation. +type Parameter struct { + // name must be set and it can be referenced in Template + // Items using ${PARAMETER_NAME}. Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Optional: The name that will show in UI instead of parameter 'Name' + DisplayName string `json:"displayName,omitempty" protobuf:"bytes,2,opt,name=displayName"` + + // description of a parameter. Optional. + Description string `json:"description,omitempty" protobuf:"bytes,3,opt,name=description"` + + // value holds the Parameter data. If specified, the generator will be + // ignored. The value replaces all occurrences of the Parameter ${Name} + // expression during the Template to Config transformation. Optional. + Value string `json:"value,omitempty" protobuf:"bytes,4,opt,name=value"` + + // generate specifies the generator to be used to generate random string + // from an input value specified by From field. The result string is + // stored into Value field. If empty, no generator is being used, leaving + // the result Value untouched. Optional. + // + // The only supported generator is "expression", which accepts a "from" + // value in the form of a simple regular expression containing the + // range expression "[a-zA-Z0-9]", and the length expression "a{length}". + // + // Examples: + // + // from | value + // ----------------------------- + // "test[0-9]{1}x" | "test7x" + // "[0-1]{8}" | "01001100" + // "0x[A-F0-9]{4}" | "0xB3AF" + // "[a-zA-Z0-9]{8}" | "hW4yQU5i" + // + Generate string `json:"generate,omitempty" protobuf:"bytes,5,opt,name=generate"` + + // from is an input value for the generator. Optional. + From string `json:"from,omitempty" protobuf:"bytes,6,opt,name=from"` + + // Optional: Indicates the parameter must have a value. Defaults to false. + Required bool `json:"required,omitempty" protobuf:"varint,7,opt,name=required"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TemplateInstance requests and records the instantiation of a Template. +// TemplateInstance is part of an experimental API. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type TemplateInstance struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec describes the desired state of this TemplateInstance. + Spec TemplateInstanceSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // status describes the current state of this TemplateInstance. + // +optional + Status TemplateInstanceStatus `json:"status" protobuf:"bytes,3,opt,name=status"` +} + +// TemplateInstanceSpec describes the desired state of a TemplateInstance. +type TemplateInstanceSpec struct { + // template is a full copy of the template for instantiation. + Template Template `json:"template" protobuf:"bytes,1,opt,name=template"` + + // secret is a reference to a Secret object containing the necessary + // template parameters. + Secret *corev1.LocalObjectReference `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` + + // requester holds the identity of the agent requesting the template + // instantiation. + // +optional + Requester *TemplateInstanceRequester `json:"requester" protobuf:"bytes,3,opt,name=requester"` +} + +// TemplateInstanceRequester holds the identity of an agent requesting a +// template instantiation. +type TemplateInstanceRequester struct { + // username uniquely identifies this user among all active users. + Username string `json:"username,omitempty" protobuf:"bytes,1,opt,name=username"` + + // uid is a unique value that identifies this user across time; if this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"` + + // groups represent the groups this user is a part of. + Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` + + // extra holds additional information provided by the authenticator. + Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,4,rep,name=extra"` +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ExtraValue []string + +func (t ExtraValue) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +// TemplateInstanceStatus describes the current state of a TemplateInstance. +type TemplateInstanceStatus struct { + // conditions represent the latest available observations of a + // TemplateInstance's current state. + Conditions []TemplateInstanceCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` + + // objects references the objects created by the TemplateInstance. + Objects []TemplateInstanceObject `json:"objects,omitempty" protobuf:"bytes,2,rep,name=objects"` +} + +// TemplateInstanceCondition contains condition information for a +// TemplateInstance. +type TemplateInstanceCondition struct { + // type of the condition, currently Ready or InstantiateFailure. + Type TemplateInstanceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=TemplateInstanceConditionType"` + // status of the condition, one of True, False or Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status"` + // lastTransitionTime is the last time a condition status transitioned from + // one state to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is a brief machine readable explanation for the condition's last + // transition. + Reason string `json:"reason" protobuf:"bytes,4,opt,name=reason"` + // message is a human readable description of the details of the last + // transition, complementing reason. + Message string `json:"message" protobuf:"bytes,5,opt,name=message"` +} + +// TemplateInstanceConditionType is the type of condition pertaining to a +// TemplateInstance. +type TemplateInstanceConditionType string + +const ( + // TemplateInstanceReady indicates the readiness of the template + // instantiation. + TemplateInstanceReady TemplateInstanceConditionType = "Ready" + // TemplateInstanceInstantiateFailure indicates the failure of the template + // instantiation + TemplateInstanceInstantiateFailure TemplateInstanceConditionType = "InstantiateFailure" +) + +// TemplateInstanceObject references an object created by a TemplateInstance. +type TemplateInstanceObject struct { + // ref is a reference to the created object. When used under .spec, only + // name and namespace are used; these can contain references to parameters + // which will be substituted following the usual rules. + Ref corev1.ObjectReference `json:"ref,omitempty" protobuf:"bytes,1,opt,name=ref"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TemplateInstanceList is a list of TemplateInstance objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type TemplateInstanceList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of Templateinstances + Items []TemplateInstance `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BrokerTemplateInstance holds the service broker-related state associated with +// a TemplateInstance. BrokerTemplateInstance is part of an experimental API. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BrokerTemplateInstance struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec describes the state of this BrokerTemplateInstance. + Spec BrokerTemplateInstanceSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// BrokerTemplateInstanceSpec describes the state of a BrokerTemplateInstance. +type BrokerTemplateInstanceSpec struct { + // templateInstance is a reference to a TemplateInstance object residing + // in a namespace. + TemplateInstance corev1.ObjectReference `json:"templateInstance" protobuf:"bytes,1,opt,name=templateInstance"` + + // secret is a reference to a Secret object residing in a namespace, + // containing the necessary template parameters. + Secret corev1.ObjectReference `json:"secret" protobuf:"bytes,2,opt,name=secret"` + + // bindingIDs is a list of 'binding_id's provided during successive bind + // calls to the template service broker. + BindingIDs []string `json:"bindingIDs,omitempty" protobuf:"bytes,3,rep,name=bindingIDs"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BrokerTemplateInstanceList is a list of BrokerTemplateInstance objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BrokerTemplateInstanceList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of BrokerTemplateInstances + Items []BrokerTemplateInstance `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/openshift/api/template/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/template/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..ff14f246bd28a --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/zz_generated.deepcopy.go @@ -0,0 +1,394 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BrokerTemplateInstance) DeepCopyInto(out *BrokerTemplateInstance) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerTemplateInstance. +func (in *BrokerTemplateInstance) DeepCopy() *BrokerTemplateInstance { + if in == nil { + return nil + } + out := new(BrokerTemplateInstance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BrokerTemplateInstance) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BrokerTemplateInstanceList) DeepCopyInto(out *BrokerTemplateInstanceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BrokerTemplateInstance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerTemplateInstanceList. +func (in *BrokerTemplateInstanceList) DeepCopy() *BrokerTemplateInstanceList { + if in == nil { + return nil + } + out := new(BrokerTemplateInstanceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BrokerTemplateInstanceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BrokerTemplateInstanceSpec) DeepCopyInto(out *BrokerTemplateInstanceSpec) { + *out = *in + out.TemplateInstance = in.TemplateInstance + out.Secret = in.Secret + if in.BindingIDs != nil { + in, out := &in.BindingIDs, &out.BindingIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerTemplateInstanceSpec. +func (in *BrokerTemplateInstanceSpec) DeepCopy() *BrokerTemplateInstanceSpec { + if in == nil { + return nil + } + out := new(BrokerTemplateInstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ExtraValue) DeepCopyInto(out *ExtraValue) { + { + in := &in + *out = make(ExtraValue, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue. +func (in ExtraValue) DeepCopy() ExtraValue { + if in == nil { + return nil + } + out := new(ExtraValue) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Parameter) DeepCopyInto(out *Parameter) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Parameter. +func (in *Parameter) DeepCopy() *Parameter { + if in == nil { + return nil + } + out := new(Parameter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Template) DeepCopyInto(out *Template) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Objects != nil { + in, out := &in.Objects, &out.Objects + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]Parameter, len(*in)) + copy(*out, *in) + } + if in.ObjectLabels != nil { + in, out := &in.ObjectLabels, &out.ObjectLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Template. +func (in *Template) DeepCopy() *Template { + if in == nil { + return nil + } + out := new(Template) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Template) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInstance) DeepCopyInto(out *TemplateInstance) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstance. +func (in *TemplateInstance) DeepCopy() *TemplateInstance { + if in == nil { + return nil + } + out := new(TemplateInstance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TemplateInstance) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInstanceCondition) DeepCopyInto(out *TemplateInstanceCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceCondition. +func (in *TemplateInstanceCondition) DeepCopy() *TemplateInstanceCondition { + if in == nil { + return nil + } + out := new(TemplateInstanceCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInstanceList) DeepCopyInto(out *TemplateInstanceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TemplateInstance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceList. +func (in *TemplateInstanceList) DeepCopy() *TemplateInstanceList { + if in == nil { + return nil + } + out := new(TemplateInstanceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TemplateInstanceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInstanceObject) DeepCopyInto(out *TemplateInstanceObject) { + *out = *in + out.Ref = in.Ref + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceObject. +func (in *TemplateInstanceObject) DeepCopy() *TemplateInstanceObject { + if in == nil { + return nil + } + out := new(TemplateInstanceObject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInstanceRequester) DeepCopyInto(out *TemplateInstanceRequester) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(ExtraValue, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceRequester. +func (in *TemplateInstanceRequester) DeepCopy() *TemplateInstanceRequester { + if in == nil { + return nil + } + out := new(TemplateInstanceRequester) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInstanceSpec) DeepCopyInto(out *TemplateInstanceSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Requester != nil { + in, out := &in.Requester, &out.Requester + *out = new(TemplateInstanceRequester) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceSpec. +func (in *TemplateInstanceSpec) DeepCopy() *TemplateInstanceSpec { + if in == nil { + return nil + } + out := new(TemplateInstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInstanceStatus) DeepCopyInto(out *TemplateInstanceStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]TemplateInstanceCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Objects != nil { + in, out := &in.Objects, &out.Objects + *out = make([]TemplateInstanceObject, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceStatus. +func (in *TemplateInstanceStatus) DeepCopy() *TemplateInstanceStatus { + if in == nil { + return nil + } + out := new(TemplateInstanceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateList) DeepCopyInto(out *TemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Template, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateList. +func (in *TemplateList) DeepCopy() *TemplateList { + if in == nil { + return nil + } + out := new(TemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/github.com/openshift/api/template/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/template/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000..761390d02f770 --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,159 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_BrokerTemplateInstance = map[string]string{ + "": "BrokerTemplateInstance holds the service broker-related state associated with a TemplateInstance. BrokerTemplateInstance is part of an experimental API.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec describes the state of this BrokerTemplateInstance.", +} + +func (BrokerTemplateInstance) SwaggerDoc() map[string]string { + return map_BrokerTemplateInstance +} + +var map_BrokerTemplateInstanceList = map[string]string{ + "": "BrokerTemplateInstanceList is a list of BrokerTemplateInstance objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of BrokerTemplateInstances", +} + +func (BrokerTemplateInstanceList) SwaggerDoc() map[string]string { + return map_BrokerTemplateInstanceList +} + +var map_BrokerTemplateInstanceSpec = map[string]string{ + "": "BrokerTemplateInstanceSpec describes the state of a BrokerTemplateInstance.", + "templateInstance": "templateInstance is a reference to a TemplateInstance object residing in a namespace.", + "secret": "secret is a reference to a Secret object residing in a namespace, containing the necessary template parameters.", + "bindingIDs": "bindingIDs is a list of 'binding_id's provided during successive bind calls to the template service broker.", +} + +func (BrokerTemplateInstanceSpec) SwaggerDoc() map[string]string { + return map_BrokerTemplateInstanceSpec +} + +var map_Parameter = map[string]string{ + "": "Parameter defines a name/value variable that is to be processed during the Template to Config transformation.", + "name": "name must be set and it can be referenced in Template Items using ${PARAMETER_NAME}. Required.", + "displayName": "Optional: The name that will show in UI instead of parameter 'Name'", + "description": "description of a parameter. Optional.", + "value": "value holds the Parameter data. If specified, the generator will be ignored. The value replaces all occurrences of the Parameter ${Name} expression during the Template to Config transformation. Optional.", + "generate": "generate specifies the generator to be used to generate random string from an input value specified by From field. The result string is stored into Value field. If empty, no generator is being used, leaving the result Value untouched. Optional.\n\nThe only supported generator is \"expression\", which accepts a \"from\" value in the form of a simple regular expression containing the range expression \"[a-zA-Z0-9]\", and the length expression \"a{length}\".\n\nExamples:\n\nfrom | value", + "from": "from is an input value for the generator. Optional.", + "required": "Optional: Indicates the parameter must have a value. Defaults to false.", +} + +func (Parameter) SwaggerDoc() map[string]string { + return map_Parameter +} + +var map_Template = map[string]string{ + "": "Template contains the inputs needed to produce a Config.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "message": "message is an optional instructional message that will be displayed when this template is instantiated. This field should inform the user how to utilize the newly created resources. Parameter substitution will be performed on the message before being displayed so that generated credentials and other parameters can be included in the output.", + "objects": "objects is an array of resources to include in this template. If a namespace value is hardcoded in the object, it will be removed during template instantiation, however if the namespace value is, or contains, a ${PARAMETER_REFERENCE}, the resolved value after parameter substitution will be respected and the object will be created in that namespace.", + "parameters": "parameters is an optional array of Parameters used during the Template to Config transformation.", + "labels": "labels is a optional set of labels that are applied to every object during the Template to Config transformation.", +} + +func (Template) SwaggerDoc() map[string]string { + return map_Template +} + +var map_TemplateInstance = map[string]string{ + "": "TemplateInstance requests and records the instantiation of a Template. TemplateInstance is part of an experimental API.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec describes the desired state of this TemplateInstance.", + "status": "status describes the current state of this TemplateInstance.", +} + +func (TemplateInstance) SwaggerDoc() map[string]string { + return map_TemplateInstance +} + +var map_TemplateInstanceCondition = map[string]string{ + "": "TemplateInstanceCondition contains condition information for a TemplateInstance.", + "type": "type of the condition, currently Ready or InstantiateFailure.", + "status": "status of the condition, one of True, False or Unknown.", + "lastTransitionTime": "lastTransitionTime is the last time a condition status transitioned from one state to another.", + "reason": "reason is a brief machine readable explanation for the condition's last transition.", + "message": "message is a human readable description of the details of the last transition, complementing reason.", +} + +func (TemplateInstanceCondition) SwaggerDoc() map[string]string { + return map_TemplateInstanceCondition +} + +var map_TemplateInstanceList = map[string]string{ + "": "TemplateInstanceList is a list of TemplateInstance objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of Templateinstances", +} + +func (TemplateInstanceList) SwaggerDoc() map[string]string { + return map_TemplateInstanceList +} + +var map_TemplateInstanceObject = map[string]string{ + "": "TemplateInstanceObject references an object created by a TemplateInstance.", + "ref": "ref is a reference to the created object. When used under .spec, only name and namespace are used; these can contain references to parameters which will be substituted following the usual rules.", +} + +func (TemplateInstanceObject) SwaggerDoc() map[string]string { + return map_TemplateInstanceObject +} + +var map_TemplateInstanceRequester = map[string]string{ + "": "TemplateInstanceRequester holds the identity of an agent requesting a template instantiation.", + "username": "username uniquely identifies this user among all active users.", + "uid": "uid is a unique value that identifies this user across time; if this user is deleted and another user by the same name is added, they will have different UIDs.", + "groups": "groups represent the groups this user is a part of.", + "extra": "extra holds additional information provided by the authenticator.", +} + +func (TemplateInstanceRequester) SwaggerDoc() map[string]string { + return map_TemplateInstanceRequester +} + +var map_TemplateInstanceSpec = map[string]string{ + "": "TemplateInstanceSpec describes the desired state of a TemplateInstance.", + "template": "template is a full copy of the template for instantiation.", + "secret": "secret is a reference to a Secret object containing the necessary template parameters.", + "requester": "requester holds the identity of the agent requesting the template instantiation.", +} + +func (TemplateInstanceSpec) SwaggerDoc() map[string]string { + return map_TemplateInstanceSpec +} + +var map_TemplateInstanceStatus = map[string]string{ + "": "TemplateInstanceStatus describes the current state of a TemplateInstance.", + "conditions": "conditions represent the latest available observations of a TemplateInstance's current state.", + "objects": "objects references the objects created by the TemplateInstance.", +} + +func (TemplateInstanceStatus) SwaggerDoc() map[string]string { + return map_TemplateInstanceStatus +} + +var map_TemplateList = map[string]string{ + "": "TemplateList is a list of Template objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of templates", +} + +func (TemplateList) SwaggerDoc() map[string]string { + return map_TemplateList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/user/v1/doc.go b/vendor/github.com/openshift/api/user/v1/doc.go new file mode 100644 index 0000000000000..42287095e242a --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/user/apis/user +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=user.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/user/v1/generated.pb.go b/vendor/github.com/openshift/api/user/v1/generated.pb.go new file mode 100644 index 0000000000000..0689ed38990ea --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/generated.pb.go @@ -0,0 +1,2274 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/user/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Group) Reset() { *m = Group{} } +func (*Group) ProtoMessage() {} +func (*Group) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{0} +} +func (m *Group) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Group) XXX_Merge(src proto.Message) { + xxx_messageInfo_Group.Merge(m, src) +} +func (m *Group) XXX_Size() int { + return m.Size() +} +func (m *Group) XXX_DiscardUnknown() { + xxx_messageInfo_Group.DiscardUnknown(m) +} + +var xxx_messageInfo_Group proto.InternalMessageInfo + +func (m *GroupList) Reset() { *m = GroupList{} } +func (*GroupList) ProtoMessage() {} +func (*GroupList) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{1} +} +func (m *GroupList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupList) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupList.Merge(m, src) +} +func (m *GroupList) XXX_Size() int { + return m.Size() +} +func (m *GroupList) XXX_DiscardUnknown() { + xxx_messageInfo_GroupList.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupList proto.InternalMessageInfo + +func (m *Identity) Reset() { *m = Identity{} } +func (*Identity) ProtoMessage() {} +func (*Identity) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{2} +} +func (m *Identity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Identity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Identity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Identity.Merge(m, src) +} +func (m *Identity) XXX_Size() int { + return m.Size() +} +func (m *Identity) XXX_DiscardUnknown() { + xxx_messageInfo_Identity.DiscardUnknown(m) +} + +var xxx_messageInfo_Identity proto.InternalMessageInfo + +func (m *IdentityList) Reset() { *m = IdentityList{} } +func (*IdentityList) ProtoMessage() {} +func (*IdentityList) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{3} +} +func (m *IdentityList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IdentityList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IdentityList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IdentityList.Merge(m, src) +} +func (m *IdentityList) XXX_Size() int { + return m.Size() +} +func (m *IdentityList) XXX_DiscardUnknown() { + xxx_messageInfo_IdentityList.DiscardUnknown(m) +} + +var xxx_messageInfo_IdentityList proto.InternalMessageInfo + +func (m *OptionalNames) Reset() { *m = OptionalNames{} } +func (*OptionalNames) ProtoMessage() {} +func (*OptionalNames) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{4} +} +func (m *OptionalNames) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OptionalNames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OptionalNames) XXX_Merge(src proto.Message) { + xxx_messageInfo_OptionalNames.Merge(m, src) +} +func (m *OptionalNames) XXX_Size() int { + return m.Size() +} +func (m *OptionalNames) XXX_DiscardUnknown() { + xxx_messageInfo_OptionalNames.DiscardUnknown(m) +} + +var xxx_messageInfo_OptionalNames proto.InternalMessageInfo + +func (m *User) Reset() { *m = User{} } +func (*User) ProtoMessage() {} +func (*User) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{5} +} +func (m *User) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *User) XXX_Merge(src proto.Message) { + xxx_messageInfo_User.Merge(m, src) +} +func (m *User) XXX_Size() int { + return m.Size() +} +func (m *User) XXX_DiscardUnknown() { + xxx_messageInfo_User.DiscardUnknown(m) +} + +var xxx_messageInfo_User proto.InternalMessageInfo + +func (m *UserIdentityMapping) Reset() { *m = UserIdentityMapping{} } +func (*UserIdentityMapping) ProtoMessage() {} +func (*UserIdentityMapping) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{6} +} +func (m *UserIdentityMapping) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserIdentityMapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserIdentityMapping) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserIdentityMapping.Merge(m, src) +} +func (m *UserIdentityMapping) XXX_Size() int { + return m.Size() +} +func (m *UserIdentityMapping) XXX_DiscardUnknown() { + xxx_messageInfo_UserIdentityMapping.DiscardUnknown(m) +} + +var xxx_messageInfo_UserIdentityMapping proto.InternalMessageInfo + +func (m *UserList) Reset() { *m = UserList{} } +func (*UserList) ProtoMessage() {} +func (*UserList) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{7} +} +func (m *UserList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserList) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserList.Merge(m, src) +} +func (m *UserList) XXX_Size() int { + return m.Size() +} +func (m *UserList) XXX_DiscardUnknown() { + xxx_messageInfo_UserList.DiscardUnknown(m) +} + +var xxx_messageInfo_UserList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Group)(nil), "github.com.openshift.api.user.v1.Group") + proto.RegisterType((*GroupList)(nil), "github.com.openshift.api.user.v1.GroupList") + proto.RegisterType((*Identity)(nil), "github.com.openshift.api.user.v1.Identity") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.user.v1.Identity.ExtraEntry") + proto.RegisterType((*IdentityList)(nil), "github.com.openshift.api.user.v1.IdentityList") + proto.RegisterType((*OptionalNames)(nil), "github.com.openshift.api.user.v1.OptionalNames") + proto.RegisterType((*User)(nil), "github.com.openshift.api.user.v1.User") + proto.RegisterType((*UserIdentityMapping)(nil), "github.com.openshift.api.user.v1.UserIdentityMapping") + proto.RegisterType((*UserList)(nil), "github.com.openshift.api.user.v1.UserList") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/user/v1/generated.proto", fileDescriptor_ea159b02d89a1362) +} + +var fileDescriptor_ea159b02d89a1362 = []byte{ + // 726 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x3d, 0x6f, 0x13, 0x4b, + 0x14, 0xf5, 0xc4, 0xde, 0xc8, 0x9e, 0x38, 0x4f, 0xd6, 0xbe, 0x14, 0x2b, 0x17, 0x6b, 0x6b, 0x9f, + 0xf4, 0x88, 0x10, 0xcc, 0x26, 0x11, 0x20, 0x2b, 0xa5, 0x45, 0x82, 0x22, 0x12, 0x12, 0x46, 0xa2, + 0x89, 0x28, 0x98, 0xd8, 0xe3, 0xf5, 0x60, 0xef, 0x87, 0x76, 0x67, 0x2d, 0xdc, 0xe5, 0x27, 0x40, + 0x47, 0xc9, 0x9f, 0x40, 0x14, 0x88, 0x3e, 0x74, 0x29, 0x53, 0x20, 0x8b, 0x2c, 0x1d, 0xbf, 0x02, + 0xcd, 0xec, 0x87, 0xd7, 0xf9, 0x90, 0x23, 0x21, 0xb9, 0xdb, 0xb9, 0x73, 0xcf, 0x99, 0x73, 0xcf, + 0xbd, 0xd7, 0x32, 0xdc, 0xb0, 0x18, 0xef, 0x87, 0x27, 0xa8, 0xe3, 0xda, 0xa6, 0xeb, 0x51, 0x27, + 0xe8, 0xb3, 0x1e, 0x37, 0x89, 0xc7, 0xcc, 0x30, 0xa0, 0xbe, 0x39, 0xda, 0x34, 0x2d, 0xea, 0x50, + 0x9f, 0x70, 0xda, 0x45, 0x9e, 0xef, 0x72, 0x57, 0x6d, 0x4e, 0x11, 0x28, 0x43, 0x20, 0xe2, 0x31, + 0x24, 0x10, 0x68, 0xb4, 0x59, 0x7f, 0x98, 0xe3, 0xb4, 0x5c, 0xcb, 0x35, 0x25, 0xf0, 0x24, 0xec, + 0xc9, 0x93, 0x3c, 0xc8, 0xaf, 0x98, 0xb0, 0x6e, 0x0c, 0x5a, 0x01, 0x62, 0xae, 0x7c, 0xb4, 0xe3, + 0xfa, 0xf4, 0x86, 0x47, 0xeb, 0x8f, 0xa6, 0x39, 0x36, 0xe9, 0xf4, 0x99, 0x43, 0xfd, 0xb1, 0xe9, + 0x0d, 0x2c, 0x11, 0x08, 0x4c, 0x9b, 0x72, 0x72, 0x13, 0xea, 0xc9, 0x6d, 0x28, 0x3f, 0x74, 0x38, + 0xb3, 0xa9, 0x19, 0x74, 0xfa, 0xd4, 0x26, 0x57, 0x71, 0xc6, 0x57, 0x00, 0x95, 0x67, 0xbe, 0x1b, + 0x7a, 0xea, 0x1b, 0x58, 0x16, 0xe4, 0x5d, 0xc2, 0x89, 0x06, 0x9a, 0x60, 0x7d, 0x65, 0x6b, 0x03, + 0xc5, 0xa4, 0x28, 0x4f, 0x8a, 0xbc, 0x81, 0x25, 0x02, 0x01, 0x12, 0xd9, 0x68, 0xb4, 0x89, 0x0e, + 0x4f, 0xde, 0xd2, 0x0e, 0x3f, 0xa0, 0x9c, 0xb4, 0xd5, 0xb3, 0x49, 0xa3, 0x10, 0x4d, 0x1a, 0x70, + 0x1a, 0xc3, 0x19, 0xab, 0x7a, 0x04, 0x15, 0xe1, 0x5b, 0xa0, 0x2d, 0x49, 0x7a, 0x13, 0xcd, 0xb3, + 0x17, 0x1d, 0x7a, 0x9c, 0xb9, 0x0e, 0x19, 0xbe, 0x20, 0x36, 0x0d, 0xda, 0x95, 0x68, 0xd2, 0x50, + 0x5e, 0x09, 0x06, 0x1c, 0x13, 0x19, 0x5f, 0x00, 0xac, 0x48, 0xf5, 0xfb, 0x2c, 0xe0, 0xea, 0xeb, + 0x6b, 0x15, 0xa0, 0xbb, 0x55, 0x20, 0xd0, 0x52, 0x7f, 0x2d, 0xd1, 0x5f, 0x4e, 0x23, 0x39, 0xf5, + 0xfb, 0x50, 0x61, 0x9c, 0xda, 0x42, 0x7d, 0x71, 0x7d, 0x65, 0xeb, 0xde, 0x7c, 0xf5, 0x52, 0x59, + 0x7b, 0x35, 0xe1, 0x54, 0xf6, 0x04, 0x1a, 0xc7, 0x24, 0xc6, 0xf7, 0x22, 0x2c, 0xef, 0x75, 0xa9, + 0xc3, 0x19, 0x1f, 0x2f, 0xc0, 0xfa, 0x16, 0xac, 0x7a, 0xbe, 0x3b, 0x62, 0x5d, 0xea, 0x0b, 0x2f, + 0x65, 0x07, 0x2a, 0xed, 0xb5, 0x04, 0x53, 0x3d, 0xca, 0xdd, 0xe1, 0x99, 0x4c, 0xf5, 0x29, 0xac, + 0xa5, 0x67, 0x61, 0xbd, 0x44, 0x17, 0x25, 0x5a, 0x4b, 0xd0, 0xb5, 0xa3, 0x2b, 0xf7, 0xf8, 0x1a, + 0x42, 0xdd, 0x81, 0x25, 0xe1, 0x8a, 0x56, 0x92, 0xd5, 0xfd, 0x97, 0xab, 0x0e, 0x89, 0x3d, 0x98, + 0xd6, 0x82, 0x69, 0x8f, 0xfa, 0xd4, 0xe9, 0xd0, 0x76, 0x35, 0xa1, 0x2f, 0x09, 0x12, 0x2c, 0xe1, + 0xea, 0x31, 0x54, 0xe8, 0x3b, 0xee, 0x13, 0x4d, 0x91, 0x3d, 0x78, 0x3c, 0xbf, 0x07, 0xa9, 0xc7, + 0x68, 0x47, 0xe0, 0x76, 0x1c, 0xee, 0x8f, 0xa7, 0x1d, 0x91, 0x31, 0x1c, 0x53, 0xd6, 0x5b, 0x10, + 0x4e, 0x73, 0xd4, 0x1a, 0x2c, 0x0e, 0xe8, 0x58, 0x76, 0xa3, 0x82, 0xc5, 0xa7, 0xba, 0x06, 0x95, + 0x11, 0x19, 0x86, 0x89, 0x77, 0x38, 0x3e, 0x6c, 0x2f, 0xb5, 0x80, 0xf1, 0x0d, 0xc0, 0x6a, 0xfa, + 0xce, 0x02, 0x06, 0xf1, 0x70, 0x76, 0x10, 0xef, 0xdf, 0xdd, 0x84, 0x5b, 0x66, 0x71, 0x1b, 0xae, + 0xce, 0x2c, 0x9a, 0xda, 0x48, 0x5f, 0x00, 0xcd, 0xe2, 0x7a, 0x25, 0xde, 0xbb, 0x3c, 0x62, 0xbb, + 0xfc, 0xf1, 0x53, 0xa3, 0x70, 0xfa, 0xa3, 0x59, 0x30, 0x7e, 0x03, 0x28, 0x1b, 0xb4, 0x80, 0x19, + 0x7e, 0x00, 0xcb, 0xbd, 0x70, 0x38, 0xcc, 0xcd, 0x6f, 0xe6, 0xd2, 0x6e, 0x12, 0xc7, 0x59, 0x86, + 0x8a, 0x20, 0x64, 0x71, 0xd9, 0x8c, 0x06, 0x5a, 0x51, 0x16, 0xf2, 0x8f, 0xe0, 0xde, 0xcb, 0xa2, + 0x38, 0x97, 0xa1, 0x1a, 0x70, 0xd9, 0x12, 0xfb, 0x1a, 0x68, 0x25, 0x99, 0x0b, 0xa3, 0x49, 0x63, + 0x59, 0x6e, 0x70, 0x80, 0x93, 0x1b, 0xe3, 0xc3, 0x12, 0xfc, 0x57, 0x14, 0x9b, 0xfa, 0x79, 0x40, + 0x3c, 0x8f, 0x39, 0xd6, 0x02, 0x6a, 0x7f, 0x09, 0xcb, 0x89, 0xd6, 0x71, 0xf2, 0xeb, 0x79, 0xa7, + 0x1d, 0xca, 0x0c, 0x4a, 0x15, 0xe3, 0x8c, 0x26, 0x5b, 0xc9, 0xe2, 0x5f, 0xad, 0xa4, 0xf1, 0x19, + 0xc0, 0xb2, 0x38, 0x2e, 0x60, 0xf0, 0x9f, 0xcf, 0x0e, 0xfe, 0xff, 0xf3, 0x07, 0x5f, 0x08, 0xbb, + 0x79, 0xe8, 0xdb, 0xbb, 0x67, 0x97, 0x7a, 0xe1, 0xfc, 0x52, 0x2f, 0x5c, 0x5c, 0xea, 0x85, 0xd3, + 0x48, 0x07, 0x67, 0x91, 0x0e, 0xce, 0x23, 0x1d, 0x5c, 0x44, 0x3a, 0xf8, 0x19, 0xe9, 0xe0, 0xfd, + 0x2f, 0xbd, 0x70, 0xdc, 0x9c, 0xf7, 0x9f, 0xe1, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x45, 0x85, + 0x81, 0x86, 0x56, 0x08, 0x00, 0x00, +} + +func (m *Group) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Group) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Group) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Users != nil { + { + size, err := m.Users.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GroupList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Identity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Identity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Identity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) + for k := range m.Extra { + keysForExtra = append(keysForExtra, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + i -= len(m.ProviderUserName) + copy(dAtA[i:], m.ProviderUserName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderUserName))) + i-- + dAtA[i] = 0x1a + i -= len(m.ProviderName) + copy(dAtA[i:], m.ProviderName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IdentityList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IdentityList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IdentityList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m OptionalNames) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m OptionalNames) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m OptionalNames) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *User) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *User) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *User) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Identities) > 0 { + for iNdEx := len(m.Identities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Identities[iNdEx]) + copy(dAtA[i:], m.Identities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Identities[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.FullName) + copy(dAtA[i:], m.FullName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FullName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *UserIdentityMapping) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserIdentityMapping) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserIdentityMapping) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Identity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *UserList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Group) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Users != nil { + l = m.Users.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *GroupList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Identity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ProviderName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ProviderUserName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *IdentityList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m OptionalNames) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *User) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FullName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Identities) > 0 { + for _, s := range m.Identities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *UserIdentityMapping) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Identity.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *UserList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Group) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Group{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Users:` + strings.Replace(fmt.Sprintf("%v", this.Users), "OptionalNames", "OptionalNames", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GroupList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Group{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Group", "Group", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&GroupList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Identity) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]string{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&Identity{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ProviderName:` + fmt.Sprintf("%v", this.ProviderName) + `,`, + `ProviderUserName:` + fmt.Sprintf("%v", this.ProviderUserName) + `,`, + `User:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.User), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `Extra:` + mapStringForExtra + `,`, + `}`, + }, "") + return s +} +func (this *IdentityList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Identity{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Identity", "Identity", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&IdentityList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *User) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&User{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `FullName:` + fmt.Sprintf("%v", this.FullName) + `,`, + `Identities:` + fmt.Sprintf("%v", this.Identities) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `}`, + }, "") + return s +} +func (this *UserIdentityMapping) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserIdentityMapping{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Identity:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Identity), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `User:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.User), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *UserList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]User{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "User", "User", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&UserList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Group) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Group: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Group: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Users == nil { + m.Users = OptionalNames{} + } + if err := m.Users.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Group{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Identity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Identity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Identity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProviderName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderUserName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProviderUserName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Extra == nil { + m.Extra = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Extra[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IdentityList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IdentityList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IdentityList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Identity{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OptionalNames) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OptionalNames: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OptionalNames: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *User) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: User: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FullName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FullName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Identities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Identities = append(m.Identities, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserIdentityMapping) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserIdentityMapping: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserIdentityMapping: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Identity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Identity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, User{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/user/v1/generated.proto b/vendor/github.com/openshift/api/user/v1/generated.proto new file mode 100644 index 0000000000000..f07b446ad452a --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/generated.proto @@ -0,0 +1,144 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.user.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/user/v1"; + +// Group represents a referenceable set of Users +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message Group { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // users is the list of users in this group. + optional OptionalNames users = 2; +} + +// GroupList is a collection of Groups +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message GroupList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of groups + repeated Group items = 2; +} + +// Identity records a successful authentication of a user with an identity provider. The +// information about the source of authentication is stored on the identity, and the identity +// is then associated with a single user object. Multiple identities can reference a single +// user. Information retrieved from the authentication provider is stored in the extra field +// using a schema determined by the provider. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message Identity { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // providerName is the source of identity information + optional string providerName = 2; + + // providerUserName uniquely represents this identity in the scope of the provider + optional string providerUserName = 3; + + // user is a reference to the user this identity is associated with + // Both Name and UID must be set + optional .k8s.io.api.core.v1.ObjectReference user = 4; + + // extra holds extra information about this identity + map extra = 5; +} + +// IdentityList is a collection of Identities +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message IdentityList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of identities + repeated Identity items = 2; +} + +// OptionalNames is an array that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message OptionalNames { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// Upon log in, every user of the system receives a User and Identity resource. Administrators +// may directly manipulate the attributes of the users for their own tracking, or set groups +// via the API. The user name is unique and is chosen based on the value provided by the +// identity provider - if a user already exists with the incoming name, the user name may have +// a number appended to it depending on the configuration of the system. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message User { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // fullName is the full name of user + optional string fullName = 2; + + // identities are the identities associated with this user + // +optional + repeated string identities = 3; + + // groups specifies group names this user is a member of. + // This field is deprecated and will be removed in a future release. + // Instead, create a Group object containing the name of this User. + repeated string groups = 4; +} + +// UserIdentityMapping maps a user to an identity +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message UserIdentityMapping { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // identity is a reference to an identity + optional .k8s.io.api.core.v1.ObjectReference identity = 2; + + // user is a reference to a user + optional .k8s.io.api.core.v1.ObjectReference user = 3; +} + +// UserList is a collection of Users +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message UserList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of users + repeated User items = 2; +} + diff --git a/vendor/github.com/openshift/api/user/v1/legacy.go b/vendor/github.com/openshift/api/user/v1/legacy.go new file mode 100644 index 0000000000000..6817a9f1f3ea9 --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/legacy.go @@ -0,0 +1,27 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &User{}, + &UserList{}, + &Identity{}, + &IdentityList{}, + &UserIdentityMapping{}, + &Group{}, + &GroupList{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/user/v1/register.go b/vendor/github.com/openshift/api/user/v1/register.go new file mode 100644 index 0000000000000..11341d72a98ac --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/register.go @@ -0,0 +1,44 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "user.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &User{}, + &UserList{}, + &Identity{}, + &IdentityList{}, + &UserIdentityMapping{}, + &Group{}, + &GroupList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/user/v1/types.go b/vendor/github.com/openshift/api/user/v1/types.go new file mode 100644 index 0000000000000..64ae8c8300b24 --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/types.go @@ -0,0 +1,174 @@ +package v1 + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Upon log in, every user of the system receives a User and Identity resource. Administrators +// may directly manipulate the attributes of the users for their own tracking, or set groups +// via the API. The user name is unique and is chosen based on the value provided by the +// identity provider - if a user already exists with the incoming name, the user name may have +// a number appended to it depending on the configuration of the system. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type User struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // fullName is the full name of user + FullName string `json:"fullName,omitempty" protobuf:"bytes,2,opt,name=fullName"` + + // identities are the identities associated with this user + // +optional + Identities []string `json:"identities,omitempty" protobuf:"bytes,3,rep,name=identities"` + + // groups specifies group names this user is a member of. + // This field is deprecated and will be removed in a future release. + // Instead, create a Group object containing the name of this User. + Groups []string `json:"groups" protobuf:"bytes,4,rep,name=groups"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// UserList is a collection of Users +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type UserList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of users + Items []User `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Identity records a successful authentication of a user with an identity provider. The +// information about the source of authentication is stored on the identity, and the identity +// is then associated with a single user object. Multiple identities can reference a single +// user. Information retrieved from the authentication provider is stored in the extra field +// using a schema determined by the provider. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Identity struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // providerName is the source of identity information + ProviderName string `json:"providerName" protobuf:"bytes,2,opt,name=providerName"` + + // providerUserName uniquely represents this identity in the scope of the provider + ProviderUserName string `json:"providerUserName" protobuf:"bytes,3,opt,name=providerUserName"` + + // user is a reference to the user this identity is associated with + // Both Name and UID must be set + User corev1.ObjectReference `json:"user" protobuf:"bytes,4,opt,name=user"` + + // extra holds extra information about this identity + Extra map[string]string `json:"extra,omitempty" protobuf:"bytes,5,rep,name=extra"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IdentityList is a collection of Identities +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type IdentityList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of identities + Items []Identity `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +genclient:onlyVerbs=get,create,update,delete +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// UserIdentityMapping maps a user to an identity +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type UserIdentityMapping struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // identity is a reference to an identity + Identity corev1.ObjectReference `json:"identity,omitempty" protobuf:"bytes,2,opt,name=identity"` + // user is a reference to a user + User corev1.ObjectReference `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` +} + +// OptionalNames is an array that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type OptionalNames []string + +func (t OptionalNames) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Group represents a referenceable set of Users +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Group struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // users is the list of users in this group. + Users OptionalNames `json:"users" protobuf:"bytes,2,rep,name=users"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GroupList is a collection of Groups +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type GroupList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of groups + Items []Group `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/openshift/api/user/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/user/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..e6b2fb867c331 --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/zz_generated.deepcopy.go @@ -0,0 +1,258 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Group) DeepCopyInto(out *Group) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make(OptionalNames, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Group. +func (in *Group) DeepCopy() *Group { + if in == nil { + return nil + } + out := new(Group) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Group) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupList) DeepCopyInto(out *GroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Group, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupList. +func (in *GroupList) DeepCopy() *GroupList { + if in == nil { + return nil + } + out := new(GroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Identity) DeepCopyInto(out *Identity) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.User = in.User + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Identity. +func (in *Identity) DeepCopy() *Identity { + if in == nil { + return nil + } + out := new(Identity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Identity) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityList) DeepCopyInto(out *IdentityList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Identity, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityList. +func (in *IdentityList) DeepCopy() *IdentityList { + if in == nil { + return nil + } + out := new(IdentityList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IdentityList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in OptionalNames) DeepCopyInto(out *OptionalNames) { + { + in := &in + *out = make(OptionalNames, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalNames. +func (in OptionalNames) DeepCopy() OptionalNames { + if in == nil { + return nil + } + out := new(OptionalNames) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *User) DeepCopyInto(out *User) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Identities != nil { + in, out := &in.Identities, &out.Identities + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new User. +func (in *User) DeepCopy() *User { + if in == nil { + return nil + } + out := new(User) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *User) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserIdentityMapping) DeepCopyInto(out *UserIdentityMapping) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Identity = in.Identity + out.User = in.User + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserIdentityMapping. +func (in *UserIdentityMapping) DeepCopy() *UserIdentityMapping { + if in == nil { + return nil + } + out := new(UserIdentityMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserIdentityMapping) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserList) DeepCopyInto(out *UserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]User, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserList. +func (in *UserList) DeepCopy() *UserList { + if in == nil { + return nil + } + out := new(UserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/github.com/openshift/api/user/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/user/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000..d85e7dfc58a16 --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,90 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Group = map[string]string{ + "": "Group represents a referenceable set of Users\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "users": "users is the list of users in this group.", +} + +func (Group) SwaggerDoc() map[string]string { + return map_Group +} + +var map_GroupList = map[string]string{ + "": "GroupList is a collection of Groups\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of groups", +} + +func (GroupList) SwaggerDoc() map[string]string { + return map_GroupList +} + +var map_Identity = map[string]string{ + "": "Identity records a successful authentication of a user with an identity provider. The information about the source of authentication is stored on the identity, and the identity is then associated with a single user object. Multiple identities can reference a single user. Information retrieved from the authentication provider is stored in the extra field using a schema determined by the provider.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "providerName": "providerName is the source of identity information", + "providerUserName": "providerUserName uniquely represents this identity in the scope of the provider", + "user": "user is a reference to the user this identity is associated with Both Name and UID must be set", + "extra": "extra holds extra information about this identity", +} + +func (Identity) SwaggerDoc() map[string]string { + return map_Identity +} + +var map_IdentityList = map[string]string{ + "": "IdentityList is a collection of Identities\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of identities", +} + +func (IdentityList) SwaggerDoc() map[string]string { + return map_IdentityList +} + +var map_User = map[string]string{ + "": "Upon log in, every user of the system receives a User and Identity resource. Administrators may directly manipulate the attributes of the users for their own tracking, or set groups via the API. The user name is unique and is chosen based on the value provided by the identity provider - if a user already exists with the incoming name, the user name may have a number appended to it depending on the configuration of the system.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "fullName": "fullName is the full name of user", + "identities": "identities are the identities associated with this user", + "groups": "groups specifies group names this user is a member of. This field is deprecated and will be removed in a future release. Instead, create a Group object containing the name of this User.", +} + +func (User) SwaggerDoc() map[string]string { + return map_User +} + +var map_UserIdentityMapping = map[string]string{ + "": "UserIdentityMapping maps a user to an identity\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "identity": "identity is a reference to an identity", + "user": "user is a reference to a user", +} + +func (UserIdentityMapping) SwaggerDoc() map[string]string { + return map_UserIdentityMapping +} + +var map_UserList = map[string]string{ + "": "UserList is a collection of Users\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of users", +} + +func (UserList) SwaggerDoc() map[string]string { + return map_UserList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/apiserver-library-go/LICENSE b/vendor/github.com/openshift/apiserver-library-go/LICENSE new file mode 100644 index 0000000000000..261eeb9e9f8b2 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/accept.go b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/accept.go new file mode 100644 index 0000000000000..c30bc9a5d0b77 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/accept.go @@ -0,0 +1,133 @@ +package imagepolicy + +import ( + "fmt" + + "k8s.io/klog/v2" + + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + kapi "k8s.io/kubernetes/pkg/apis/core" + + "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/imagereferencemutators" + "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/rules" + "github.com/openshift/library-go/pkg/image/reference" +) + +var errRejectByPolicy = fmt.Errorf("this image is prohibited by policy") + +type policyDecisions map[kapi.ObjectReference]policyDecision + +type policyDecision struct { + attrs *rules.ImagePolicyAttributes + tested bool + resolutionErr error +} + +func accept(accepter rules.Accepter, policy imageResolutionPolicy, resolver imageResolver, m imagereferencemutators.ImageReferenceMutator, annotations imagereferencemutators.AnnotationAccessor, attr admission.Attributes, excludedRules sets.String, mutationAllowed bool) error { + decisions := policyDecisions{} + + t := attr.GetResource().GroupResource() + gr := metav1.GroupResource{Resource: t.Resource, Group: t.Group} + + resolveAllNames := imagereferencemutators.ResolveAllNames(annotations) + + errs := m.Mutate(func(ref *kapi.ObjectReference) error { + // create the attribute set for this particular reference, if we have never seen the reference + // before + decision, ok := decisions[*ref] + if !ok { + if policy.RequestsResolution(gr) { + resolvedAttrs, err := resolver.ResolveObjectReference(ref, attr.GetNamespace(), resolveAllNames) + switch { + case err != nil && policy.FailOnResolutionFailure(gr): + klog.V(5).Infof("resource failed on error during required image resolution: %v", err) + // if we had a resolution error and we're supposed to fail, fail + decision.resolutionErr = err + decision.tested = true + decisions[*ref] = decision + return err + + case err != nil: + klog.V(5).Infof("error during optional image resolution: %v", err) + // if we had an error, but aren't supposed to fail, just don't do anything else and keep track of + // the resolution failure + decision.resolutionErr = err + + case err == nil: + oldDecissionAttributes := decision.attrs + // if we resolved properly, assign the attributes and rewrite the pull spec if we need to + decision.attrs = resolvedAttrs + + if policy.RewriteImagePullSpec(resolvedAttrs, attr.GetOperation() == admission.Update, gr) { + refUpdate := kapi.ObjectReference{Kind: "DockerImage", Name: resolvedAttrs.Name.Exact()} + + // check if we are mutating object in validate phase and discard the update + // this allows creation of objects like imagestreams in between admit and validate + if !mutationAllowed && (ref.Namespace != refUpdate.Namespace || ref.Name != refUpdate.Name || ref.Kind != refUpdate.Kind) { + klog.V(5).Infof("image resolution changed between admit and verify: falling back to the old image attributes (attributes=%#v)", oldDecissionAttributes) + } else { + ref.Namespace = refUpdate.Namespace + ref.Name = refUpdate.Name + ref.Kind = refUpdate.Kind + } + } + } + } + // if we don't have any image policy attributes, attempt a best effort parse for the remaining tests + if decision.attrs == nil { + decision.attrs = &rules.ImagePolicyAttributes{} + + // an objectref that is DockerImage ref will have a name that corresponds to its pull spec. We can parse that + // to a docker image ref + if ref != nil && ref.Kind == "DockerImage" { + decision.attrs.Name, _ = reference.Parse(ref.Name) + } + } + decision.attrs.Resource = gr + decision.attrs.ExcludedRules = excludedRules + klog.V(5).Infof("post resolution, ref=%s:%s/%s, image attributes=%#v, resolution err=%v", ref.Kind, ref.Name, ref.Namespace, *decision.attrs, decision.resolutionErr) + } + + // we only need to test a given input once for acceptance + if !decision.tested { + accepted := accepter.Accepts(decision.attrs) + klog.V(5).Infof("Made decision for %v (as: %v, resolution err: %v): accept=%t", ref, decision.attrs.Name, decision.resolutionErr, accepted) + + decision.tested = true + decisions[*ref] = decision + + if !accepted { + // if the image is rejected due to a resolution error, return the resolution error + // This is a dubious result. It's entirely possible we had an error resolving the image, + // but no rule actually requires image resolution and the image was rejected for some other + // reason. The user will then see that it was rejected due to the resolution error, but + // that isn't really why it was rejected. Better logic would check if the rule that + // rejected the image, required resolution, and only then report the resolution falure. + if decision.resolutionErr != nil { + return decision.resolutionErr + } + // otherwise the image is being rejected by policy + return errRejectByPolicy + } + } + + return nil + }) + + for i := range errs { + errs[i].Type = field.ErrorTypeForbidden + if errs[i].Detail != errRejectByPolicy.Error() { + errs[i].Detail = fmt.Sprintf("this image is prohibited by policy: %s", errs[i].Detail) + } + } + + if len(errs) > 0 { + klog.V(5).Infof("image policy admission rejecting due to: %v", errs) + return apierrs.NewInvalid(attr.GetKind().GroupKind(), attr.GetName(), errs) + } + return nil +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1/default-policy.yaml b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1/default-policy.yaml new file mode 100644 index 0000000000000..8a24de7b4b829 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1/default-policy.yaml @@ -0,0 +1,20 @@ +kind: ImagePolicyConfig +apiVersion: image.openshift.io/v1 +# To require that all images running on the platform be imported first, you may uncomment the +# following rule. Any image that refers to a registry outside of OpenShift will be rejected unless it +# unless it points directly to an image digest (myregistry.com/myrepo/image@sha256:ea83bcf...) and that +# digest has been imported via the import-image flow. +#resolveImages: Required +executionRules: +- name: execution-denied + # Reject all images that have the annotation images.openshift.io/deny-execution set to true. + # This annotation may be set by infrastructure that wishes to flag particular images as dangerous + onResources: + - resource: pods + - resource: builds + group: build.openshift.io + reject: true + matchImageAnnotations: + - key: images.openshift.io/deny-execution + value: "true" + skipOnResolutionFailure: true diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1/defaults.go b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1/defaults.go new file mode 100644 index 0000000000000..b0fbe3a573b45 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1/defaults.go @@ -0,0 +1,78 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + kapi "k8s.io/kubernetes/pkg/apis/core" +) + +func SetDefaults_ImagePolicyConfig(obj *ImagePolicyConfig) { + if obj == nil { + return + } + + if len(obj.ResolveImages) == 0 { + obj.ResolveImages = Attempt + } + + for i := range obj.ExecutionRules { + if len(obj.ExecutionRules[i].OnResources) == 0 { + obj.ExecutionRules[i].OnResources = []metav1.GroupResource{{Resource: "pods", Group: kapi.GroupName}} + } + } + + if obj.ResolutionRules == nil { + obj.ResolutionRules = []ImageResolutionPolicyRule{ + {TargetResource: metav1.GroupResource{Group: "", Resource: "pods"}, LocalNames: true}, + {TargetResource: metav1.GroupResource{Group: "", Resource: "replicationcontrollers"}, LocalNames: true}, + {TargetResource: metav1.GroupResource{Group: "apps.openshift.io", Resource: "deploymentconfigs"}, LocalNames: true}, + {TargetResource: metav1.GroupResource{Group: "apps", Resource: "daemonsets"}, LocalNames: true}, + {TargetResource: metav1.GroupResource{Group: "apps", Resource: "deployments"}, LocalNames: true}, + {TargetResource: metav1.GroupResource{Group: "apps", Resource: "statefulsets"}, LocalNames: true}, + {TargetResource: metav1.GroupResource{Group: "apps", Resource: "replicasets"}, LocalNames: true}, + {TargetResource: metav1.GroupResource{Group: "build.openshift.io", Resource: "builds"}, LocalNames: true}, + {TargetResource: metav1.GroupResource{Group: "batch", Resource: "jobs"}, LocalNames: true}, + {TargetResource: metav1.GroupResource{Group: "batch", Resource: "cronjobs"}, LocalNames: true}, + {TargetResource: metav1.GroupResource{Group: "extensions", Resource: "daemonsets"}, LocalNames: true}, + {TargetResource: metav1.GroupResource{Group: "extensions", Resource: "deployments"}, LocalNames: true}, + {TargetResource: metav1.GroupResource{Group: "extensions", Resource: "replicasets"}, LocalNames: true}, + } + // default the resolution policy to the global default + for i := range obj.ResolutionRules { + if len(obj.ResolutionRules[i].Policy) != 0 { + continue + } + obj.ResolutionRules[i].Policy = DoNotAttempt + for _, rule := range obj.ExecutionRules { + if executionRuleCoversResource(rule, obj.ResolutionRules[i].TargetResource) { + obj.ResolutionRules[i].Policy = obj.ResolveImages + break + } + } + } + } else { + // default the resolution policy to the global default + for i := range obj.ResolutionRules { + if len(obj.ResolutionRules[i].Policy) != 0 { + continue + } + obj.ResolutionRules[i].Policy = obj.ResolveImages + } + } + +} + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&ImagePolicyConfig{}, func(obj interface{}) { SetDefaults_ImagePolicyConfig(obj.(*ImagePolicyConfig)) }) + return nil +} + +// executionRuleCoversResource returns true if gr is covered by rule. +func executionRuleCoversResource(rule ImageExecutionPolicyRule, gr metav1.GroupResource) bool { + for _, target := range rule.OnResources { + if target.Group == gr.Group && (target.Resource == gr.Resource || target.Resource == "*") { + return true + } + } + return false +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1/doc.go b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1/doc.go new file mode 100644 index 0000000000000..124d5620f4a74 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1/register.go b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1/register.go new file mode 100644 index 0000000000000..602e3a2c541a6 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1/register.go @@ -0,0 +1,26 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func (obj *ImagePolicyConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } + +var GroupVersion = schema.GroupVersion{Group: "image.openshift.io", Version: "v1"} + +var ( + schemeBuilder = runtime.NewSchemeBuilder( + addKnownTypes, + addDefaultingFuncs, + ) + Install = schemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ImagePolicyConfig{}, + ) + return nil +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1/types.go b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1/types.go new file mode 100644 index 0000000000000..b5d5198a0e95e --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1/types.go @@ -0,0 +1,132 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +const ( + PluginName = "image.openshift.io/ImagePolicy" + + // IgnorePolicyRulesAnnotation is a comma delimited list of rule names to omit from consideration + // in a given namespace. Loaded from the namespace. + IgnorePolicyRulesAnnotation = "alpha.image.policy.openshift.io/ignore-rules" + // ResolveNamesAnnotation when placed on an object template or object requests that all relevant + // image names be resolved by taking the name and tag and attempting to resolve a local image stream. + // This overrides the imageLookupPolicy on the image stream. If the object is not namespaced the + // annotation is ignored. The only valid value is '*'. + ResolveNamesAnnotation = "alpha.image.policy.openshift.io/resolve-names" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImagePolicyConfig is the configuration for control of images running on the platform. +type ImagePolicyConfig struct { + metav1.TypeMeta `json:",inline"` + + // ResolveImages indicates the default image resolution behavior. If a rewriting policy is chosen, + // then the image pull specs will be updated. + ResolveImages ImageResolutionType `json:"resolveImages"` + + // ResolutionRules allows more specific image resolution rules to be applied per resource. If + // empty, it defaults to allowing local image stream lookups - "mysql" will map to the image stream + // tag "mysql:latest" in the current namespace if the stream supports it. The default for this + // field is all known types that support image resolution, and the policy for those rules will be + // set to the overall resolution policy if an execution rule references the same resource. + ResolutionRules []ImageResolutionPolicyRule `json:"resolutionRules"` + + // ExecutionRules determine whether the use of an image is allowed in an object with a pod spec. + // By default, these rules only apply to pods, but may be extended to other resource types. + // If all execution rules are negations, the default behavior is allow all. If any execution rule + // is an allow, the default behavior is to reject all. + ExecutionRules []ImageExecutionPolicyRule `json:"executionRules"` +} + +// ImageResolutionType is an enumerated string that indicates how image pull spec resolution should be handled +type ImageResolutionType string + +var ( + // require resolution to succeed and rewrite the resource to use it + RequiredRewrite ImageResolutionType = "RequiredRewrite" + // require resolution to succeed, but don't rewrite the image pull spec + Required ImageResolutionType = "Required" + // attempt resolution, rewrite if successful + AttemptRewrite ImageResolutionType = "AttemptRewrite" + // attempt resolution, don't rewrite + Attempt ImageResolutionType = "Attempt" + // don't attempt resolution + DoNotAttempt ImageResolutionType = "DoNotAttempt" +) + +// ImageResolutionPolicyRule describes resolution rules based on resource. +type ImageResolutionPolicyRule struct { + // Policy controls whether resolution will happen if the rule doesn't match local names. This value + // overrides the global image policy for all target resources. + Policy ImageResolutionType `json:"policy"` + // TargetResource is the identified group and resource. If Resource is *, this rule will apply + // to all resources in that group. + TargetResource metav1.GroupResource `json:"targetResource"` + // LocalNames will allow single segment names to be interpreted as namespace local image + // stream tags, but only if the target image stream tag has the "resolveLocalNames" field + // set. + LocalNames bool `json:"localNames"` +} + +// ImageExecutionPolicyRule determines whether a provided image may be used on the platform. +type ImageExecutionPolicyRule struct { + ImageCondition `json:",inline"` + + // Reject means this rule, if it matches the condition, will cause an immediate failure. No + // other rules will be considered. + Reject bool `json:"reject"` +} + +// ImageCondition defines the conditions for matching a particular image source. The conditions below +// are all required (logical AND). If Reject is specified, the condition is false if all conditions match, +// and true otherwise. +type ImageCondition struct { + // Name is the name of this policy rule for reference. It must be unique across all rules. + Name string `json:"name"` + // IgnoreNamespaceOverride prevents this condition from being overridden when the + // `alpha.image.policy.openshift.io/ignore-rules` is set on a namespace and contains this rule name. + IgnoreNamespaceOverride bool `json:"ignoreNamespaceOverride"` + + // OnResources determines which resources this applies to. Defaults to 'pods' for ImageExecutionPolicyRules. + OnResources []metav1.GroupResource `json:"onResources"` + + // InvertMatch means the value of the condition is logically inverted (true -> false, false -> true). + InvertMatch bool `json:"invertMatch"` + + // MatchIntegratedRegistry will only match image sources that originate from the configured integrated + // registry. + MatchIntegratedRegistry bool `json:"matchIntegratedRegistry"` + // MatchRegistries will match image references that point to the provided registries. The image registry + // must match at least one of these strings. + MatchRegistries []string `json:"matchRegistries"` + + // SkipOnResolutionFailure allows the subsequent conditions to be bypassed if the integrated registry does + // not have access to image metadata (no image exists matching the image digest). + SkipOnResolutionFailure bool `json:"skipOnResolutionFailure"` + + // MatchDockerImageLabels checks against the resolved image for the presence of a Docker label. All + // conditions must match. + MatchDockerImageLabels []ValueCondition `json:"matchDockerImageLabels"` + // MatchImageLabels checks against the resolved image for a label. All conditions must match. + MatchImageLabels []metav1.LabelSelector `json:"matchImageLabels"` + // MatchImageLabelSelectors is the processed form of MatchImageLabels. All conditions must match. + // TODO: this only existed on the internal type, it's set as part of processing the configuration, + // so presumably it should not be supplied by the user. Not sure the best way to deal with it. + MatchImageLabelSelectors []labels.Selector `json:"-"` + // MatchImageAnnotations checks against the resolved image for an annotation. All conditions must match. + MatchImageAnnotations []ValueCondition `json:"matchImageAnnotations"` +} + +// ValueCondition reflects whether the following key in a map is set or has a given value. +type ValueCondition struct { + // Key is the name of a key in a map to retrieve. + Key string `json:"key"` + // Set indicates the provided key exists in the map. This field is exclusive with Value. + Set bool `json:"set"` + // Value indicates the provided key has the given value. This field is exclusive with Set. + Value string `json:"value"` +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..5d3f232521fab --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1/zz_generated.deepcopy.go @@ -0,0 +1,151 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageCondition) DeepCopyInto(out *ImageCondition) { + *out = *in + if in.OnResources != nil { + in, out := &in.OnResources, &out.OnResources + *out = make([]metav1.GroupResource, len(*in)) + copy(*out, *in) + } + if in.MatchRegistries != nil { + in, out := &in.MatchRegistries, &out.MatchRegistries + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MatchDockerImageLabels != nil { + in, out := &in.MatchDockerImageLabels, &out.MatchDockerImageLabels + *out = make([]ValueCondition, len(*in)) + copy(*out, *in) + } + if in.MatchImageLabels != nil { + in, out := &in.MatchImageLabels, &out.MatchImageLabels + *out = make([]metav1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MatchImageLabelSelectors != nil { + in, out := &in.MatchImageLabelSelectors, &out.MatchImageLabelSelectors + *out = make([]labels.Selector, len(*in)) + for i := range *in { + if (*in)[i] != nil { + (*out)[i] = (*in)[i].DeepCopySelector() + } + } + } + if in.MatchImageAnnotations != nil { + in, out := &in.MatchImageAnnotations, &out.MatchImageAnnotations + *out = make([]ValueCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageCondition. +func (in *ImageCondition) DeepCopy() *ImageCondition { + if in == nil { + return nil + } + out := new(ImageCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageExecutionPolicyRule) DeepCopyInto(out *ImageExecutionPolicyRule) { + *out = *in + in.ImageCondition.DeepCopyInto(&out.ImageCondition) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageExecutionPolicyRule. +func (in *ImageExecutionPolicyRule) DeepCopy() *ImageExecutionPolicyRule { + if in == nil { + return nil + } + out := new(ImageExecutionPolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyConfig) DeepCopyInto(out *ImagePolicyConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ResolutionRules != nil { + in, out := &in.ResolutionRules, &out.ResolutionRules + *out = make([]ImageResolutionPolicyRule, len(*in)) + copy(*out, *in) + } + if in.ExecutionRules != nil { + in, out := &in.ExecutionRules, &out.ExecutionRules + *out = make([]ImageExecutionPolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyConfig. +func (in *ImagePolicyConfig) DeepCopy() *ImagePolicyConfig { + if in == nil { + return nil + } + out := new(ImagePolicyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImagePolicyConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageResolutionPolicyRule) DeepCopyInto(out *ImageResolutionPolicyRule) { + *out = *in + out.TargetResource = in.TargetResource + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageResolutionPolicyRule. +func (in *ImageResolutionPolicyRule) DeepCopy() *ImageResolutionPolicyRule { + if in == nil { + return nil + } + out := new(ImageResolutionPolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueCondition) DeepCopyInto(out *ValueCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueCondition. +func (in *ValueCondition) DeepCopy() *ValueCondition { + if in == nil { + return nil + } + out := new(ValueCondition) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/validation/validation.go b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/validation/validation.go new file mode 100644 index 0000000000000..ecc0756ed18e1 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/validation/validation.go @@ -0,0 +1,55 @@ +package validation + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + + imagepolicy "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1" +) + +func Validate(config *imagepolicy.ImagePolicyConfig) field.ErrorList { + allErrs := field.ErrorList{} + if config == nil { + return allErrs + } + names := sets.NewString() + for i, rule := range config.ExecutionRules { + if names.Has(rule.Name) { + allErrs = append(allErrs, field.Duplicate(field.NewPath(imagepolicy.PluginName, "executionRules").Index(i).Child("name"), rule.Name)) + } + names.Insert(rule.Name) + for j, selector := range rule.MatchImageLabels { + _, err := metav1.LabelSelectorAsSelector(&selector) + if err != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath(imagepolicy.PluginName, "executionRules").Index(i).Child("matchImageLabels").Index(j), nil, err.Error())) + } + } + } + + for i, rule := range config.ResolutionRules { + if len(rule.Policy) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath(imagepolicy.PluginName, "resolutionRules").Index(i).Child("policy"), "a policy must be specified for this resource")) + } + if len(rule.TargetResource.Resource) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath(imagepolicy.PluginName, "resolutionRules").Index(i).Child("targetResource", "resource"), "a target resource name or '*' must be provided")) + } + } + + // if you don't attempt resolution, you'll never be able to pass any rule that logically requires it + if config.ResolveImages == imagepolicy.DoNotAttempt { + for i, rule := range config.ExecutionRules { + if len(rule.MatchDockerImageLabels) > 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath(imagepolicy.PluginName, "executionRules").Index(i).Child("matchDockerImageLabels"), rule.MatchDockerImageLabels, "images are not being resolved, this condition will always fail")) + } + if len(rule.MatchImageLabels) > 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath(imagepolicy.PluginName, "executionRules").Index(i).Child("matchImageLabels"), rule.MatchImageLabels, "images are not being resolved, this condition will always fail")) + } + if len(rule.MatchImageAnnotations) > 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath(imagepolicy.PluginName, "executionRules").Index(i).Child("matchImageAnnotations"), rule.MatchImageAnnotations, "images are not being resolved, this condition will always fail")) + } + } + } + + return allErrs +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/helpers.go b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/helpers.go new file mode 100644 index 0000000000000..91c1cfe9b169e --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/helpers.go @@ -0,0 +1,32 @@ +package imagepolicy + +import ( + imagepolicyapiv1 "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1" +) + +// RequestsResolution returns true if you should attempt to resolve image pull specs +func RequestsResolution(imageResolutionType imagepolicyapiv1.ImageResolutionType) bool { + switch imageResolutionType { + case imagepolicyapiv1.RequiredRewrite, imagepolicyapiv1.Required, imagepolicyapiv1.AttemptRewrite, imagepolicyapiv1.Attempt: + return true + } + return false +} + +// FailOnResolutionFailure returns true if you should fail when resolution fails +func FailOnResolutionFailure(imageResolutionType imagepolicyapiv1.ImageResolutionType) bool { + switch imageResolutionType { + case imagepolicyapiv1.RequiredRewrite, imagepolicyapiv1.Required: + return true + } + return false +} + +// RewriteImagePullSpec returns true if you should rewrite image pull specs when resolution succeeds +func RewriteImagePullSpec(imageResolutionType imagepolicyapiv1.ImageResolutionType) bool { + switch imageResolutionType { + case imagepolicyapiv1.RequiredRewrite, imagepolicyapiv1.AttemptRewrite: + return true + } + return false +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/imagepolicy.go b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/imagepolicy.go new file mode 100644 index 0000000000000..909531f7ec04e --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/imagepolicy.go @@ -0,0 +1,517 @@ +package imagepolicy + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "reflect" + "strings" + "time" + + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/diff" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/informers" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/rest" + "k8s.io/klog/v2" + kapi "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/utils/lru" + + imagev1 "github.com/openshift/api/image/v1" + imagepolicy "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1" + "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/validation" + "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/imagereferencemutators" + "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/rules" + imagev1client "github.com/openshift/client-go/image/clientset/versioned" + imagev1typedclient "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + "github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig" + "github.com/openshift/library-go/pkg/image/imageutil" + "github.com/openshift/library-go/pkg/image/reference" +) + +func Register(plugins *admission.Plugins) { + plugins.Register(imagepolicy.PluginName, + func(input io.Reader) (admission.Interface, error) { + config := &imagepolicy.ImagePolicyConfig{} + if input != nil { + configContent, err := ioutil.ReadAll(input) + if err != nil { + return nil, err + } + scheme := runtime.NewScheme() + utilruntime.Must(imagepolicy.Install(scheme)) + codecs := serializer.NewCodecFactory(scheme) + err = runtime.DecodeInto(codecs.UniversalDecoder(imagepolicy.GroupVersion), configContent, config) + if err != nil { + return nil, err + } + } + + imagepolicy.SetDefaults_ImagePolicyConfig(config) + if errs := validation.Validate(config); len(errs) > 0 { + return nil, errs.ToAggregate() + } + klog.V(5).Infof("%s admission controller loaded with config: %#v", imagepolicy.PluginName, config) + return NewImagePolicyPlugin(config) + }) +} + +type ImagePolicyPlugin struct { + *admission.Handler + config *imagepolicy.ImagePolicyConfig + Client imagev1client.Interface + + accepter rules.Accepter + + integratedRegistryMatcher integratedRegistryMatcher + + NsLister corev1listers.NamespaceLister + resolver imageResolver + + imageMutators imagereferencemutators.ImageMutators +} + +var _ = initializer.WantsExternalKubeInformerFactory(&ImagePolicyPlugin{}) +var _ = admissionrestconfig.WantsRESTClientConfig(&ImagePolicyPlugin{}) +var _ = WantsInternalImageRegistry(&ImagePolicyPlugin{}) +var _ = WantsImageMutators(&ImagePolicyPlugin{}) +var _ = admission.ValidationInterface(&ImagePolicyPlugin{}) +var _ = admission.MutationInterface(&ImagePolicyPlugin{}) + +type integratedRegistryMatcher struct { + rules.RegistryMatcher +} + +// imageResolver abstracts identifying an image for a particular reference. +type imageResolver interface { + ResolveObjectReference(ref *kapi.ObjectReference, defaultNamespace string, forceResolveLocalNames bool) (*rules.ImagePolicyAttributes, error) +} + +// imageResolutionPolicy determines whether an image should be resolved +type imageResolutionPolicy interface { + // RequestsResolution returns true if you should attempt to resolve image pull specs + RequestsResolution(metav1.GroupResource) bool + // FailOnResolutionFailure returns true if you should fail when resolution fails + FailOnResolutionFailure(metav1.GroupResource) bool + // RewriteImagePullSpec returns true if you should rewrite image pull specs when resolution succeeds + RewriteImagePullSpec(attr *rules.ImagePolicyAttributes, isUpdate bool, gr metav1.GroupResource) bool +} + +// ImagePolicyPlugin returns an admission controller for pods that controls what images are allowed to run on the +// cluster. +func NewImagePolicyPlugin(parsed *imagepolicy.ImagePolicyConfig) (*ImagePolicyPlugin, error) { + m := integratedRegistryMatcher{ + RegistryMatcher: rules.NewRegistryMatcher(nil), + } + accepter, err := rules.NewExecutionRulesAccepter(parsed.ExecutionRules, m) + if err != nil { + return nil, err + } + + return &ImagePolicyPlugin{ + Handler: admission.NewHandler(admission.Create, admission.Update), + config: parsed, + + accepter: accepter, + + integratedRegistryMatcher: m, + }, nil +} + +func (a *ImagePolicyPlugin) SetInternalImageRegistry(internalImageRegistryName string) { + a.integratedRegistryMatcher.RegistryMatcher = rules.RegistryNameMatcher(internalImageRegistryName) +} + +func (a *ImagePolicyPlugin) SetImageMutators(imageMutators imagereferencemutators.ImageMutators) { + a.imageMutators = imageMutators +} + +func (a *ImagePolicyPlugin) SetRESTClientConfig(restClientConfig rest.Config) { + var err error + a.Client, err = imagev1client.NewForConfig(&restClientConfig) + if err != nil { + utilruntime.HandleError(err) + return + } +} + +func (a *ImagePolicyPlugin) SetExternalKubeInformerFactory(kubeInformers informers.SharedInformerFactory) { + a.NsLister = kubeInformers.Core().V1().Namespaces().Lister() +} + +// Validate ensures that all required interfaces have been provided, or returns an error. +func (a *ImagePolicyPlugin) ValidateInitialization() error { + if a.Client == nil { + return fmt.Errorf("%s needs an Openshift client", imagepolicy.PluginName) + } + if a.NsLister == nil { + return fmt.Errorf("%s needs a namespace lister", imagepolicy.PluginName) + } + if a.imageMutators == nil { + return fmt.Errorf("%s needs an image mutators", imagepolicy.PluginName) + } + a.resolver = newImageResolutionCache(a.Client.ImageV1(), a.integratedRegistryMatcher) + return nil +} + +// Admit attempts to apply the image policy to the incoming resource. +func (a *ImagePolicyPlugin) Admit(ctx context.Context, attr admission.Attributes, _ admission.ObjectInterfaces) error { + return a.admit(ctx, attr, true) +} + +// Validate attempts to apply the image policy to the incoming resource. +func (a *ImagePolicyPlugin) Validate(ctx context.Context, attr admission.Attributes, _ admission.ObjectInterfaces) error { + return a.admit(ctx, attr, false) +} + +func (a *ImagePolicyPlugin) admit(ctx context.Context, attr admission.Attributes, mutationAllowed bool) error { + switch attr.GetOperation() { + case admission.Create, admission.Update: + if len(attr.GetSubresource()) > 0 { + return nil + } + // only create and update are tested, and only on core resources + // TODO: scan all resources + // TODO: Create a general equivalence map for admission - operation X on subresource Y is equivalent to reduced operation + default: + return nil + } + + policy := resolutionConfig{a.config} + + schemagr := attr.GetResource().GroupResource() + apigr := metav1.GroupResource{Resource: schemagr.Resource, Group: schemagr.Group} + + if !a.accepter.Covers(apigr) && !policy.Covers(apigr) { + return nil + } + + if obj, ok := attr.GetObject().(metav1.Object); ok { + for _, ownerRef := range obj.GetOwnerReferences() { + if ownerRef.Controller != nil && *ownerRef.Controller { + klog.V(5).Infof("skipping image policy admission for %s:%s/%s, reason: has controller owner reference", attr.GetKind(), attr.GetNamespace(), attr.GetName()) + return nil + } + } + } + + klog.V(5).Infof("running image policy admission for %s:%s/%s", attr.GetKind(), attr.GetNamespace(), attr.GetName()) + m, err := a.imageMutators.GetImageReferenceMutator(attr.GetObject(), attr.GetOldObject()) + if err != nil { + return apierrs.NewForbidden(schemagr, attr.GetName(), fmt.Errorf("unable to apply image policy against objects of type %T: %v", attr.GetObject(), err)) + } + + if !mutationAllowed { + m = &mutationPreventer{m} + } + + annotations, _ := a.imageMutators.GetAnnotationAccessor(attr.GetObject()) + + // load exclusion rules from the namespace cache + var excluded sets.String + if ns := attr.GetNamespace(); len(ns) > 0 { + if ns, err := a.NsLister.Get(ns); err == nil { + if value := ns.Annotations[imagepolicy.IgnorePolicyRulesAnnotation]; len(value) > 0 { + excluded = sets.NewString(strings.Split(value, ",")...) + } + } + } + + if err := accept(a.accepter, policy, a.resolver, m, annotations, attr, excluded, mutationAllowed); err != nil { + return err + } + + return nil +} + +type mutationPreventer struct { + m imagereferencemutators.ImageReferenceMutator +} + +func (m *mutationPreventer) Mutate(fn imagereferencemutators.ImageReferenceMutateFunc) field.ErrorList { + return m.m.Mutate(func(ref *kapi.ObjectReference) error { + original := ref.DeepCopy() + if err := fn(ref); err != nil { + return fmt.Errorf("error in image policy validation: %v", err) + } + if !reflect.DeepEqual(ref, original) { + klog.V(2).Infof("disallowed mutation in image policy validation: %s", diff.ObjectGoPrintSideBySide(original, ref)) + return fmt.Errorf("this image is prohibited by policy (changed after admission)") + } + return nil + }) +} + +type imageResolutionCache struct { + imageClient imagev1typedclient.ImageV1Interface + integrated rules.RegistryMatcher + expiration time.Duration + + cache *lru.Cache +} + +type imageCacheEntry struct { + expires time.Time + image *imagev1.Image +} + +// newImageResolutionCache creates a new resolver that caches frequently loaded images for one minute. +func newImageResolutionCache(imageClient imagev1typedclient.ImageV1Interface, integratedRegistry rules.RegistryMatcher) *imageResolutionCache { + imageCache := lru.New(128) + return &imageResolutionCache{ + imageClient: imageClient, + integrated: integratedRegistry, + cache: imageCache, + expiration: time.Minute, + } +} + +var now = time.Now + +// ResolveObjectReference converts a reference into an image API or returns an error. If the kind is not recognized +// this method will return an error to prevent references that may be images from being ignored. +func (c *imageResolutionCache) ResolveObjectReference(ref *kapi.ObjectReference, defaultNamespace string, forceResolveLocalNames bool) (*rules.ImagePolicyAttributes, error) { + switch ref.Kind { + case "ImageStreamTag": + ns := ref.Namespace + if len(ns) == 0 { + ns = defaultNamespace + } + name, tag, ok := imageutil.SplitImageStreamTag(ref.Name) + if !ok { + return &rules.ImagePolicyAttributes{IntegratedRegistry: true}, fmt.Errorf("references of kind ImageStreamTag must be of the form NAME:TAG") + } + return c.resolveImageStreamTag(ns, name, tag, false, false) + + case "ImageStreamImage": + ns := ref.Namespace + if len(ns) == 0 { + ns = defaultNamespace + } + name, id, ok := imageutil.SplitImageStreamImage(ref.Name) + if !ok { + return &rules.ImagePolicyAttributes{IntegratedRegistry: true}, fmt.Errorf("references of kind ImageStreamImage must be of the form NAME@DIGEST") + } + return c.resolveImageStreamImage(ns, name, id) + + case "DockerImage": + ref, err := reference.Parse(ref.Name) + if err != nil { + return nil, err + } + return c.resolveImageReference(ref, defaultNamespace, forceResolveLocalNames) + + default: + return nil, fmt.Errorf("image policy does not allow image references of kind %q", ref.Kind) + } +} + +// Resolve converts an image reference into a resolved image or returns an error. Only images located in the internal +// registry or those with a digest can be resolved - all other scenarios will return an error. +func (c *imageResolutionCache) resolveImageReference(ref reference.DockerImageReference, defaultNamespace string, forceResolveLocalNames bool) (*rules.ImagePolicyAttributes, error) { + // images by ID can be checked for policy + if len(ref.ID) > 0 { + now := now() + if value, ok := c.cache.Get(ref.ID); ok { + cached := value.(imageCacheEntry) + if now.Before(cached.expires) { + return &rules.ImagePolicyAttributes{Name: ref, Image: cached.image}, nil + } + } + image, err := c.imageClient.Images().Get(context.TODO(), ref.ID, metav1.GetOptions{}) + if err != nil { + return nil, err + } + c.cache.Add(ref.ID, imageCacheEntry{expires: now.Add(c.expiration), image: image}) + return &rules.ImagePolicyAttributes{Name: ref, Image: image, IntegratedRegistry: c.integrated.Matches(ref.Registry)}, nil + } + + // an image spec that points to the internal registry is by definition also an imagestreamtag reference, + // so attempt to resolve it as such. + fullReference := c.integrated.Matches(ref.Registry) + // if we've been explicitly told to treat this image spec as an imagestreamtag reference, or if it is a single + // segment value, attempt to resolve the value as an imagestream tag that will ultimately resolve to an image. + partialReference := forceResolveLocalNames || (len(ref.Registry) == 0 && len(ref.Namespace) == 0 && len(ref.Name) > 0) + + // if we can't treat it as an imagestreamtag reference, and since we don't have an imageid (checked earlier), + // we aren't going to be able to resolve this value to an image. + if !fullReference && !partialReference { + return nil, fmt.Errorf("(%s) could not be resolved to an exact image reference", ref.Exact()) + } + + tag := ref.Tag + if len(tag) == 0 { + tag = imagev1.DefaultImageTag + } + if len(ref.Namespace) == 0 || forceResolveLocalNames { + ref.Namespace = defaultNamespace + } + + return c.resolveImageStreamTag(ref.Namespace, ref.Name, tag, partialReference, forceResolveLocalNames) +} + +// resolveImageStreamTag loads an image stream tag and creates a fully qualified image stream image reference, +// or returns an error. +func (c *imageResolutionCache) resolveImageStreamTag(namespace, name, tag string, partial, forceResolveLocalNames bool) (*rules.ImagePolicyAttributes, error) { + attrs := &rules.ImagePolicyAttributes{IntegratedRegistry: true} + resolved, err := c.imageClient.ImageStreamTags(namespace).Get(context.TODO(), imageutil.JoinImageStreamTag(name, tag), metav1.GetOptions{}) + if err != nil { + if partial { + attrs.IntegratedRegistry = false + } + // if a stream exists, resolves names, and a registry is installed, change the reference to be a pointer + // to the internal registry. This prevents the lookup from going to the original location, which is consistent + // with the intent of resolving local names. + if isImageStreamTagNotFound(err) { + if stream, err := c.imageClient.ImageStreams(namespace).Get(context.TODO(), name, metav1.GetOptions{}); err == nil && (forceResolveLocalNames || stream.Spec.LookupPolicy.Local) && len(stream.Status.DockerImageRepository) > 0 { + if ref, err := reference.Parse(stream.Status.DockerImageRepository); err == nil { + klog.V(4).Infof("%s/%s:%s points to a local name resolving stream, but the tag does not exist", namespace, name, tag) + ref.Tag = tag + attrs.Name = ref + attrs.LocalRewrite = true + return attrs, nil + } + } + } + return attrs, err + } + if partial { + if !forceResolveLocalNames && !resolved.LookupPolicy.Local { + attrs.IntegratedRegistry = false + return attrs, fmt.Errorf("ImageStreamTag does not allow local references and the resource did not request image stream resolution") + } + attrs.LocalRewrite = true + } + ref, err := reference.Parse(resolved.Image.DockerImageReference) + if err != nil { + return attrs, fmt.Errorf("image reference %s could not be parsed: %v", resolved.Image.DockerImageReference, err) + } + ref.Tag = "" + ref.ID = resolved.Image.Name + + now := now() + c.cache.Add(resolved.Image.Name, imageCacheEntry{expires: now.Add(c.expiration), image: &resolved.Image}) + + attrs.Name = ref + attrs.Image = &resolved.Image + return attrs, nil +} + +// resolveImageStreamImage loads an image stream image if it exists, or returns an error. +func (c *imageResolutionCache) resolveImageStreamImage(namespace, name, id string) (*rules.ImagePolicyAttributes, error) { + attrs := &rules.ImagePolicyAttributes{IntegratedRegistry: true} + resolved, err := c.imageClient.ImageStreamImages(namespace).Get(context.TODO(), imageutil.JoinImageStreamImage(name, id), metav1.GetOptions{}) + if err != nil { + return attrs, err + } + ref, err := reference.Parse(resolved.Image.DockerImageReference) + if err != nil { + return attrs, fmt.Errorf("ImageStreamTag could not be resolved: %v", err) + } + now := now() + c.cache.Add(resolved.Image.Name, imageCacheEntry{expires: now.Add(c.expiration), image: &resolved.Image}) + + attrs.Name = ref + attrs.Image = &resolved.Image + return attrs, nil +} + +// isImageStreamTagNotFound returns true iff the tag is missing but the image stream +// exists. +func isImageStreamTagNotFound(err error) bool { + if err == nil || !apierrs.IsNotFound(err) { + return false + } + status, ok := err.(apierrs.APIStatus) + if !ok { + return false + } + details := status.Status().Details + if details == nil { + return false + } + return details.Kind == "imagestreamtags" && details.Group == "image.openshift.io" +} + +// resolutionConfig translates an ImagePolicyConfig into imageResolutionPolicy +type resolutionConfig struct { + config *imagepolicy.ImagePolicyConfig +} + +// Covers returns true if the resolver specifically should touch this resource. +func (config resolutionConfig) Covers(gr metav1.GroupResource) bool { + for _, rule := range config.config.ResolutionRules { + if resolutionRuleCoversResource(rule.TargetResource, gr) { + return true + } + } + return false +} + +// RequestsResolution is true if the policy demands it or if any rule covers it. +func (config resolutionConfig) RequestsResolution(gr metav1.GroupResource) bool { + if RequestsResolution(config.config.ResolveImages) { + return true + } + for _, rule := range config.config.ResolutionRules { + if resolutionRuleCoversResource(rule.TargetResource, gr) { + return true + } + } + return false +} + +// FailOnResolutionFailure does not depend on the nested rules. +func (config resolutionConfig) FailOnResolutionFailure(gr metav1.GroupResource) bool { + return FailOnResolutionFailure(config.config.ResolveImages) +} + +var skipImageRewriteOnUpdate = map[metav1.GroupResource]struct{}{ + // Job template specs are immutable, they cannot be updated. + {Group: "batch", Resource: "jobs"}: {}, + // Build specs are immutable, they cannot be updated. + {Group: "build.openshift.io", Resource: "builds"}: {}, +} + +// RewriteImagePullSpec applies to implicit rewrite attributes and local resources as well as if the policy requires it. +// If a local name check is requested and a rule matches true is returned. The policy default resolution is only respected +// if a resource isn't covered by a rule - if pods have a rule with DoNotAttempt and the global policy is RequiredRewrite, +// no pods will be rewritten. +func (config resolutionConfig) RewriteImagePullSpec(attr *rules.ImagePolicyAttributes, isUpdate bool, gr metav1.GroupResource) bool { + if isUpdate { + if _, ok := skipImageRewriteOnUpdate[gr]; ok { + return false + } + } + hasMatchingRule := false + for _, rule := range config.config.ResolutionRules { + if !resolutionRuleCoversResource(rule.TargetResource, gr) { + continue + } + if rule.LocalNames && attr.LocalRewrite { + return true + } + if RewriteImagePullSpec(rule.Policy) { + return true + } + hasMatchingRule = true + } + if hasMatchingRule { + return false + } + return RewriteImagePullSpec(config.config.ResolveImages) +} + +// resolutionRuleCoversResource implements wildcard checking on Resource names +func resolutionRuleCoversResource(rule metav1.GroupResource, gr metav1.GroupResource) bool { + return rule.Group == gr.Group && (rule.Resource == gr.Resource || rule.Resource == "*") +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/imagereferencemutators/interfaces.go b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/imagereferencemutators/interfaces.go new file mode 100644 index 0000000000000..47f6f9cfb1de5 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/imagereferencemutators/interfaces.go @@ -0,0 +1,49 @@ +package imagereferencemutators + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + kapi "k8s.io/kubernetes/pkg/apis/core" +) + +type ImageMutators interface { + GetImageReferenceMutator(obj, old runtime.Object) (ImageReferenceMutator, error) + GetAnnotationAccessor(obj runtime.Object) (AnnotationAccessor, bool) +} + +// ImageReferenceMutateFunc is passed a reference representing an image, and may alter +// the Name, Kind, and Namespace fields of the reference. If an error is returned the +// object may still be mutated under the covers. +type ImageReferenceMutateFunc func(ref *kapi.ObjectReference) error + +type ImageReferenceMutator interface { + // Mutate invokes fn on every image reference in the object. If fn returns an error, + // a field.Error is added to the list to be returned. Mutate does not terminate early + // if errors are detected. + Mutate(fn ImageReferenceMutateFunc) field.ErrorList +} + +type AnnotationAccessor interface { + // Annotations returns a map representing annotations. Not mutable. + Annotations() map[string]string + // SetAnnotations sets representing annotations onto the object. + SetAnnotations(map[string]string) + // TemplateAnnotations returns a map representing annotations on a nested template in the object. Not mutable. + // If no template is present bool will be false. + TemplateAnnotations() (map[string]string, bool) + // SetTemplateAnnotations sets annotations on a nested template in the object. + // If no template is present bool will be false. + SetTemplateAnnotations(map[string]string) bool +} + +type ContainerMutator interface { + GetName() string + GetImage() string + SetImage(image string) +} + +type PodSpecReferenceMutator interface { + GetContainerByIndex(init bool, i int) (ContainerMutator, bool) + GetContainerByName(name string) (ContainerMutator, bool) + GetPath() *field.Path +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/imagereferencemutators/meta.go b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/imagereferencemutators/meta.go new file mode 100644 index 0000000000000..1b606e5609c83 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/imagereferencemutators/meta.go @@ -0,0 +1,105 @@ +package imagereferencemutators + +import ( + "fmt" + + kapiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + kapi "k8s.io/kubernetes/pkg/apis/core" + + imagepolicyv1 "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1" +) + +type KubeImageMutators struct{} + +var errNoImageMutator = fmt.Errorf("No list of images available for this object") + +func ResolveAllNames(annotations AnnotationAccessor) bool { + if annotations == nil { + return false + } + + a, ok := annotations.TemplateAnnotations() + if ok && a[imagepolicyv1.ResolveNamesAnnotation] == "*" { + return true + } + + a = annotations.Annotations() + if a != nil && a[imagepolicyv1.ResolveNamesAnnotation] == "*" { + return true + } + + return false +} + +// GetImageReferenceMutator returns a mutator for the provided object, or an error if no +// such mutator is defined. Only references that are different between obj and old will +// be returned unless old is nil. +func (m KubeImageMutators) GetImageReferenceMutator(obj, old runtime.Object) (ImageReferenceMutator, error) { + oldAnnotations, _ := m.GetAnnotationAccessor(old) + annotations, _ := m.GetAnnotationAccessor(obj) + resolveAnnotationChanged := ResolveAllNames(annotations) != ResolveAllNames(oldAnnotations) + + if spec, path, err := GetPodSpec(obj); err == nil { + var oldSpec *kapi.PodSpec + if old != nil { + oldSpec, _, err = GetPodSpec(old) + if err != nil { + return nil, fmt.Errorf("old and new pod spec objects were not of the same type %T != %T: %v", obj, old, err) + } + } + return NewPodSpecMutator(spec, oldSpec, path, resolveAnnotationChanged), nil + } + if spec, path, err := GetPodSpecV1(obj); err == nil { + var oldSpec *kapiv1.PodSpec + if old != nil { + oldSpec, _, err = GetPodSpecV1(old) + if err != nil { + return nil, fmt.Errorf("old and new pod spec objects were not of the same type %T != %T: %v", obj, old, err) + } + } + return NewPodSpecV1Mutator(spec, oldSpec, path, resolveAnnotationChanged), nil + } + return nil, errNoImageMutator +} + +type annotationsAccessor struct { + object metav1.Object + template metav1.Object +} + +func (a annotationsAccessor) Annotations() map[string]string { + return a.object.GetAnnotations() +} + +func (a annotationsAccessor) TemplateAnnotations() (map[string]string, bool) { + if a.template == nil { + return nil, false + } + return a.template.GetAnnotations(), true +} + +func (a annotationsAccessor) SetAnnotations(annotations map[string]string) { + a.object.SetAnnotations(annotations) +} + +func (a annotationsAccessor) SetTemplateAnnotations(annotations map[string]string) bool { + if a.template == nil { + return false + } + a.template.SetAnnotations(annotations) + return true +} + +// GetAnnotationAccessor returns an accessor for the provided object or false if the object +// does not support accessing annotations. +func (KubeImageMutators) GetAnnotationAccessor(obj runtime.Object) (AnnotationAccessor, bool) { + switch t := obj.(type) { + case metav1.Object: + templateObject, _ := GetTemplateMetaObject(obj) + return annotationsAccessor{object: t, template: templateObject}, true + default: + return nil, false + } +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/imagereferencemutators/pods.go b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/imagereferencemutators/pods.go new file mode 100644 index 0000000000000..81b374f72d450 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/imagereferencemutators/pods.go @@ -0,0 +1,433 @@ +package imagereferencemutators + +import ( + "fmt" + + kappsv1 "k8s.io/api/apps/v1" + kappsv1beta1 "k8s.io/api/apps/v1beta1" + kappsv1beta2 "k8s.io/api/apps/v1beta2" + batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/apis/core" +) + +var errNoPodSpec = fmt.Errorf("No PodSpec available for this object") + +// GetPodSpec returns a mutable pod spec out of the provided object, including a field path +// to the field in the object, or an error if the object does not contain a pod spec. +// This only returns internal objects. +func GetPodSpec(obj runtime.Object) (*core.PodSpec, *field.Path, error) { + switch r := obj.(type) { + case *core.Pod: + return &r.Spec, field.NewPath("spec"), nil + case *core.PodTemplate: + return &r.Template.Spec, field.NewPath("template", "spec"), nil + case *core.ReplicationController: + if r.Spec.Template != nil { + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + } + case *apps.DaemonSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *apps.Deployment: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *apps.ReplicaSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *batch.Job: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *batch.CronJob: + return &r.Spec.JobTemplate.Spec.Template.Spec, field.NewPath("spec", "jobTemplate", "spec", "template", "spec"), nil + case *apps.StatefulSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + } + return nil, nil, errNoPodSpec +} + +// GetPodSpecV1 returns a mutable pod spec out of the provided object, including a field path +// to the field in the object, or an error if the object does not contain a pod spec. +// This only returns pod specs for v1 compatible objects. +func GetPodSpecV1(obj runtime.Object) (*corev1.PodSpec, *field.Path, error) { + switch r := obj.(type) { + + case *corev1.Pod: + return &r.Spec, field.NewPath("spec"), nil + + case *corev1.PodTemplate: + return &r.Template.Spec, field.NewPath("template", "spec"), nil + + case *corev1.ReplicationController: + if r.Spec.Template != nil { + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + } + + case *extensionsv1beta1.DaemonSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1.DaemonSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1beta2.DaemonSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + + case *extensionsv1beta1.Deployment: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1.Deployment: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1beta1.Deployment: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1beta2.Deployment: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + + case *extensionsv1beta1.ReplicaSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1.ReplicaSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1beta2.ReplicaSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + + case *batchv1.Job: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + + case *batchv1beta1.CronJob: + return &r.Spec.JobTemplate.Spec.Template.Spec, field.NewPath("spec", "jobTemplate", "spec", "template", "spec"), nil + case *batchv1.CronJob: + return &r.Spec.JobTemplate.Spec.Template.Spec, field.NewPath("spec", "jobTemplate", "spec", "template", "spec"), nil + + case *kappsv1.StatefulSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1beta1.StatefulSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1beta2.StatefulSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + } + return nil, nil, errNoPodSpec +} + +// GetTemplateMetaObject returns a mutable metav1.Object interface for the template +// the object contains, or false if no such object is available. +func GetTemplateMetaObject(obj runtime.Object) (metav1.Object, bool) { + switch r := obj.(type) { + + case *corev1.PodTemplate: + return &r.Template.ObjectMeta, true + + case *corev1.ReplicationController: + if r.Spec.Template != nil { + return &r.Spec.Template.ObjectMeta, true + } + + case *extensionsv1beta1.DaemonSet: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1.DaemonSet: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1beta2.DaemonSet: + return &r.Spec.Template.ObjectMeta, true + + case *extensionsv1beta1.Deployment: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1.Deployment: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1beta1.Deployment: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1beta2.Deployment: + return &r.Spec.Template.ObjectMeta, true + + case *extensionsv1beta1.ReplicaSet: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1.ReplicaSet: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1beta2.ReplicaSet: + return &r.Spec.Template.ObjectMeta, true + + case *batchv1.Job: + return &r.Spec.Template.ObjectMeta, true + + case *batchv1beta1.CronJob: + return &r.Spec.JobTemplate.Spec.Template.ObjectMeta, true + case *batchv1.CronJob: + return &r.Spec.JobTemplate.Spec.Template.ObjectMeta, true + + case *kappsv1.StatefulSet: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1beta1.StatefulSet: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1beta2.StatefulSet: + return &r.Spec.Template.ObjectMeta, true + + case *core.PodTemplate: + return &r.Template.ObjectMeta, true + case *core.ReplicationController: + if r.Spec.Template != nil { + return &r.Spec.Template.ObjectMeta, true + } + case *apps.DaemonSet: + return &r.Spec.Template.ObjectMeta, true + case *apps.Deployment: + return &r.Spec.Template.ObjectMeta, true + case *apps.ReplicaSet: + return &r.Spec.Template.ObjectMeta, true + case *batch.Job: + return &r.Spec.Template.ObjectMeta, true + case *batch.CronJob: + return &r.Spec.JobTemplate.Spec.Template.ObjectMeta, true + case *apps.StatefulSet: + return &r.Spec.Template.ObjectMeta, true + } + return nil, false +} + +type containerMutator struct { + *core.Container +} + +func (m containerMutator) GetName() string { return m.Name } +func (m containerMutator) GetImage() string { return m.Image } +func (m containerMutator) SetImage(image string) { m.Image = image } + +type containerV1Mutator struct { + *corev1.Container +} + +func (m containerV1Mutator) GetName() string { return m.Name } +func (m containerV1Mutator) GetImage() string { return m.Image } +func (m containerV1Mutator) SetImage(image string) { m.Image = image } + +// podSpecMutator implements the mutation interface over objects with a pod spec. +type podSpecMutator struct { + spec *core.PodSpec + oldSpec *core.PodSpec + path *field.Path + resolveAnnotationChanged bool +} + +func NewPodSpecMutator(spec *core.PodSpec, oldSpec *core.PodSpec, path *field.Path, resolveAnnotationChanged bool) *podSpecMutator { + return &podSpecMutator{ + spec: spec, + oldSpec: oldSpec, + path: path, + resolveAnnotationChanged: resolveAnnotationChanged, + } +} + +func (m *podSpecMutator) GetPath() *field.Path { + return m.path +} + +func hasIdenticalPodSpecImage(spec *core.PodSpec, containerName, image string) bool { + if spec == nil { + return false + } + for i := range spec.InitContainers { + if spec.InitContainers[i].Name == containerName { + return spec.InitContainers[i].Image == image + } + } + for i := range spec.Containers { + if spec.Containers[i].Name == containerName { + return spec.Containers[i].Image == image + } + } + return false +} + +// Mutate applies fn to all containers and init containers. If fn changes the Kind to +// any value other than "DockerImage", an error is set on that field. +func (m *podSpecMutator) Mutate(fn ImageReferenceMutateFunc) field.ErrorList { + var errs field.ErrorList + for i := range m.spec.InitContainers { + container := &m.spec.InitContainers[i] + if !m.resolveAnnotationChanged && hasIdenticalPodSpecImage(m.oldSpec, container.Name, container.Image) { + continue + } + ref := core.ObjectReference{Kind: "DockerImage", Name: container.Image} + if err := fn(&ref); err != nil { + errs = append(errs, FieldErrorOrInternal(err, m.path.Child("initContainers").Index(i).Child("image"))) + continue + } + if ref.Kind != "DockerImage" { + errs = append(errs, FieldErrorOrInternal(fmt.Errorf("pod specs may only contain references to docker images, not %q", ref.Kind), m.path.Child("initContainers").Index(i).Child("image"))) + continue + } + container.Image = ref.Name + } + for i := range m.spec.Containers { + container := &m.spec.Containers[i] + if !m.resolveAnnotationChanged && hasIdenticalPodSpecImage(m.oldSpec, container.Name, container.Image) { + continue + } + ref := core.ObjectReference{Kind: "DockerImage", Name: container.Image} + if err := fn(&ref); err != nil { + errs = append(errs, FieldErrorOrInternal(err, m.path.Child("containers").Index(i).Child("image"))) + continue + } + if ref.Kind != "DockerImage" { + errs = append(errs, FieldErrorOrInternal(fmt.Errorf("pod specs may only contain references to docker images, not %q", ref.Kind), m.path.Child("containers").Index(i).Child("image"))) + continue + } + container.Image = ref.Name + } + return errs +} + +func (m *podSpecMutator) GetContainerByName(name string) (ContainerMutator, bool) { + spec := m.spec + for i := range spec.InitContainers { + if name != spec.InitContainers[i].Name { + continue + } + return containerMutator{&spec.InitContainers[i]}, true + } + for i := range spec.Containers { + if name != spec.Containers[i].Name { + continue + } + return containerMutator{&spec.Containers[i]}, true + } + return nil, false +} + +func (m *podSpecMutator) GetContainerByIndex(init bool, i int) (ContainerMutator, bool) { + var container *core.Container + spec := m.spec + if init { + if i < 0 || i >= len(spec.InitContainers) { + return nil, false + } + container = &spec.InitContainers[i] + } else { + if i < 0 || i >= len(spec.Containers) { + return nil, false + } + container = &spec.Containers[i] + } + return containerMutator{container}, true +} + +func NewPodSpecV1Mutator(spec *corev1.PodSpec, oldSpec *corev1.PodSpec, path *field.Path, resolveAnnotationChanged bool) *podSpecV1Mutator { + return &podSpecV1Mutator{ + spec: spec, + oldSpec: oldSpec, + path: path, + resolveAnnotationChanged: resolveAnnotationChanged, + } +} + +// podSpecV1Mutator implements the mutation interface over objects with a pod spec. +type podSpecV1Mutator struct { + spec *corev1.PodSpec + oldSpec *corev1.PodSpec + path *field.Path + resolveAnnotationChanged bool +} + +func (m *podSpecV1Mutator) GetPath() *field.Path { + return m.path +} + +func hasIdenticalPodSpecV1Image(spec *corev1.PodSpec, containerName, image string) bool { + if spec == nil { + return false + } + for i := range spec.InitContainers { + if spec.InitContainers[i].Name == containerName { + return spec.InitContainers[i].Image == image + } + } + for i := range spec.Containers { + if spec.Containers[i].Name == containerName { + return spec.Containers[i].Image == image + } + } + return false +} + +// Mutate applies fn to all containers and init containers. If fn changes the Kind to +// any value other than "DockerImage", an error is set on that field. +func (m *podSpecV1Mutator) Mutate(fn ImageReferenceMutateFunc) field.ErrorList { + var errs field.ErrorList + for i := range m.spec.InitContainers { + container := &m.spec.InitContainers[i] + if !m.resolveAnnotationChanged && hasIdenticalPodSpecV1Image(m.oldSpec, container.Name, container.Image) { + continue + } + ref := core.ObjectReference{Kind: "DockerImage", Name: container.Image} + if err := fn(&ref); err != nil { + errs = append(errs, FieldErrorOrInternal(err, m.path.Child("initContainers").Index(i).Child("image"))) + continue + } + if ref.Kind != "DockerImage" { + errs = append(errs, FieldErrorOrInternal(fmt.Errorf("pod specs may only contain references to docker images, not %q", ref.Kind), m.path.Child("initContainers").Index(i).Child("image"))) + continue + } + container.Image = ref.Name + } + for i := range m.spec.Containers { + container := &m.spec.Containers[i] + if !m.resolveAnnotationChanged && hasIdenticalPodSpecV1Image(m.oldSpec, container.Name, container.Image) { + continue + } + ref := core.ObjectReference{Kind: "DockerImage", Name: container.Image} + if err := fn(&ref); err != nil { + errs = append(errs, FieldErrorOrInternal(err, m.path.Child("containers").Index(i).Child("image"))) + continue + } + if ref.Kind != "DockerImage" { + errs = append(errs, FieldErrorOrInternal(fmt.Errorf("pod specs may only contain references to docker images, not %q", ref.Kind), m.path.Child("containers").Index(i).Child("image"))) + continue + } + container.Image = ref.Name + } + return errs +} + +func (m *podSpecV1Mutator) GetContainerByName(name string) (ContainerMutator, bool) { + spec := m.spec + for i := range spec.InitContainers { + if name != spec.InitContainers[i].Name { + continue + } + return containerV1Mutator{&spec.InitContainers[i]}, true + } + for i := range spec.Containers { + if name != spec.Containers[i].Name { + continue + } + return containerV1Mutator{&spec.Containers[i]}, true + } + return nil, false +} + +func (m *podSpecV1Mutator) GetContainerByIndex(init bool, i int) (ContainerMutator, bool) { + var container *corev1.Container + spec := m.spec + if init { + if i < 0 || i >= len(spec.InitContainers) { + return nil, false + } + container = &spec.InitContainers[i] + } else { + if i < 0 || i >= len(spec.Containers) { + return nil, false + } + container = &spec.Containers[i] + } + return containerV1Mutator{container}, true +} + +func FieldErrorOrInternal(err error, path *field.Path) *field.Error { + if ferr, ok := err.(*field.Error); ok { + if len(ferr.Field) == 0 { + ferr.Field = path.String() + } + return ferr + } + if errors.IsNotFound(err) { + return field.NotFound(path, err) + } + return field.InternalError(path, err) +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/intializers.go b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/intializers.go new file mode 100644 index 0000000000000..a700371c8b323 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/intializers.go @@ -0,0 +1,40 @@ +package imagepolicy + +import ( + "k8s.io/apiserver/pkg/admission" + + "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/imagereferencemutators" +) + +func NewInitializer(imageMutators imagereferencemutators.ImageMutators, internalImageRegistry string) admission.PluginInitializer { + return &localInitializer{ + imageMutators: imageMutators, + internalImageRegistry: internalImageRegistry, + } +} + +type WantsImageMutators interface { + SetImageMutators(imagereferencemutators.ImageMutators) + admission.InitializationValidator +} + +type WantsInternalImageRegistry interface { + SetInternalImageRegistry(string) + admission.InitializationValidator +} + +type localInitializer struct { + imageMutators imagereferencemutators.ImageMutators + internalImageRegistry string +} + +// Initialize will check the initialization interfaces implemented by each plugin +// and provide the appropriate initialization data +func (i *localInitializer) Initialize(plugin admission.Interface) { + if wants, ok := plugin.(WantsImageMutators); ok { + wants.SetImageMutators(i.imageMutators) + } + if wants, ok := plugin.(WantsInternalImageRegistry); ok { + wants.SetInternalImageRegistry(i.internalImageRegistry) + } +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/rules/accept.go b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/rules/accept.go new file mode 100644 index 0000000000000..f117cb73ae36d --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/rules/accept.go @@ -0,0 +1,119 @@ +package rules + +import ( + "k8s.io/klog/v2" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + imagepolicy "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1" +) + +type Accepter interface { + Covers(metav1.GroupResource) bool + + Accepts(*ImagePolicyAttributes) bool +} + +// mappedAccepter implements the Accepter interface for a map of group resources and accepters +type mappedAccepter map[metav1.GroupResource]Accepter + +func (a mappedAccepter) Covers(gr metav1.GroupResource) bool { + _, ok := a[gr] + return ok +} + +// Accepts returns true if no Accepter is registered for the group resource in attributes, +// or if the registered Accepter also returns true. +func (a mappedAccepter) Accepts(attr *ImagePolicyAttributes) bool { + accepter, ok := a[attr.Resource] + if !ok { + return true + } + return accepter.Accepts(attr) +} + +type executionAccepter struct { + rules []imagepolicy.ImageExecutionPolicyRule + covers metav1.GroupResource + defaultReject bool + + integratedRegistryMatcher RegistryMatcher +} + +// NewExecutionRuleseAccepter creates an Accepter from the provided rules. +func NewExecutionRulesAccepter(rules []imagepolicy.ImageExecutionPolicyRule, integratedRegistryMatcher RegistryMatcher) (Accepter, error) { + mapped := make(mappedAccepter) + + for _, rule := range rules { + over, selectors, err := imageConditionInfo(&rule.ImageCondition) + if err != nil { + return nil, err + } + rule.ImageCondition.MatchImageLabelSelectors = selectors + for gr := range over { + a, ok := mapped[gr] + if !ok { + a = &executionAccepter{ + covers: gr, + integratedRegistryMatcher: integratedRegistryMatcher, + } + mapped[gr] = a + } + byResource := a.(*executionAccepter) + byResource.rules = append(byResource.rules, rule) + } + } + + for _, a := range mapped { + byResource := a.(*executionAccepter) + if len(byResource.rules) > 0 { + // if all rules are reject, the default behavior is allow + allReject := true + for _, rule := range byResource.rules { + if !rule.Reject { + allReject = false + break + } + } + byResource.defaultReject = !allReject + } + } + + return mapped, nil +} + +func (r *executionAccepter) Covers(gr metav1.GroupResource) bool { + return r.covers == gr +} + +func (r *executionAccepter) Accepts(attrs *ImagePolicyAttributes) bool { + if attrs.Resource != r.covers { + return true + } + + anyMatched := false + for _, rule := range r.rules { + klog.V(5).Infof("image policy checking rule %q", rule.Name) + if attrs.ExcludedRules.Has(rule.Name) && !rule.IgnoreNamespaceOverride { + klog.V(5).Infof("skipping because rule is excluded by namespace annotations\n") + continue + } + + // if we don't have a resolved image and we're supposed to skip the rule if that happens, + // continue here. Otherwise, the reject option is impossible to reason about. + if attrs.Image == nil && rule.SkipOnResolutionFailure { + klog.V(5).Infof("skipping because image is not resolved and skip on failure is true\n") + continue + } + + matches := matchImageCondition(&rule.ImageCondition, r.integratedRegistryMatcher, attrs) + klog.V(5).Infof("Rule %q(reject=%t) applies to image %v: %t", rule.Name, rule.Reject, attrs.Name, matches) + if matches { + if rule.Reject { + return false + } + anyMatched = true + } + } + return anyMatched || !r.defaultReject +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/rules/rules.go b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/rules/rules.go new file mode 100644 index 0000000000000..78735e4276e89 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/rules/rules.go @@ -0,0 +1,178 @@ +package rules + +import ( + "github.com/openshift/library-go/pkg/image/imageutil" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" + + "github.com/openshift/api/image/docker10" + imagev1 "github.com/openshift/api/image/v1" + imagepolicy "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1" + "github.com/openshift/library-go/pkg/image/reference" +) + +type ImagePolicyAttributes struct { + Resource metav1.GroupResource + Name reference.DockerImageReference + Image *imagev1.Image + ExcludedRules sets.String + IntegratedRegistry bool + LocalRewrite bool +} + +type RegistryMatcher interface { + Matches(name string) bool +} + +type RegistryNameMatcher string + +func (m RegistryNameMatcher) Matches(name string) bool { + if len(m) == 0 { + return false + } + return string(m) == name +} + +type nameSet []string + +func (m nameSet) Matches(name string) bool { + for _, s := range m { + if s == name { + return true + } + } + return false +} + +func NewRegistryMatcher(names []string) RegistryMatcher { + return nameSet(names) +} + +type resourceSet map[metav1.GroupResource]struct{} + +func imageConditionInfo(rule *imagepolicy.ImageCondition) (covers resourceSet, selectors []labels.Selector, err error) { + covers = make(resourceSet) + for _, gr := range rule.OnResources { + covers[gr] = struct{}{} + } + + for i := range rule.MatchImageLabels { + s, err := metav1.LabelSelectorAsSelector(&rule.MatchImageLabels[i]) + if err != nil { + return nil, nil, err + } + selectors = append(selectors, s) + } + + return covers, selectors, nil +} + +func requiresImage(rule *imagepolicy.ImageCondition) bool { + switch { + case len(rule.MatchImageLabels) > 0, + len(rule.MatchImageAnnotations) > 0, + len(rule.MatchDockerImageLabels) > 0: + return true + } + + return false +} + +// matchImageCondition determines the result of an ImageCondition or the provided arguments. +func matchImageCondition(condition *imagepolicy.ImageCondition, integrated RegistryMatcher, attrs *ImagePolicyAttributes) bool { + result := matchImageConditionValues(condition, integrated, attrs) + klog.V(5).Infof("image matches conditions for %q: %t(invert=%t)", condition.Name, result, condition.InvertMatch) + if condition.InvertMatch { + result = !result + } + return result +} + +// matchImageConditionValues handles only the match rules on the condition, returning true if the conditions match. +// Use matchImageCondition to apply invertMatch rules. +func matchImageConditionValues(rule *imagepolicy.ImageCondition, integrated RegistryMatcher, attrs *ImagePolicyAttributes) bool { + if rule.MatchIntegratedRegistry && !(attrs.IntegratedRegistry || integrated.Matches(attrs.Name.Registry)) { + klog.V(5).Infof("image registry %v does not match integrated registry", attrs.Name.Registry) + return false + } + if len(rule.MatchRegistries) > 0 && !hasAnyMatch(attrs.Name.Registry, rule.MatchRegistries) { + klog.V(5).Infof("image registry %v does not match registries from rule: %#v", attrs.Name.Registry, rule.MatchRegistries) + return false + } + + // all subsequent calls require the image + image := attrs.Image + if image == nil { + if rule.SkipOnResolutionFailure { + klog.V(5).Infof("rule does not match because image did not resolve and SkipOnResolutionFailure is true") + // Likely we will never get here (see: https://github.com/openshift/origin/blob/4f709b48f8e52e8c6012bd8b91945f022a437a6a/pkg/image/admission/imagepolicy/rules/accept.go#L99-L103) + // but if we do, treat the condition as not matching since we are supposed to skip this rule on resolution failure. + return false + } + + // if we don't require an image to evaluate our rules, then there's no reason to continue from here + // we already know that we passed our filter + r := requiresImage(rule) + klog.V(5).Infof("image did not resolve, rule requires image metadata for matching: %t", r) + return !r + } + + if len(rule.MatchDockerImageLabels) > 0 { + if err := imageutil.ImageWithMetadata(image); err != nil { + if rule.SkipOnResolutionFailure { + return false + } else { + return true + } + } + dockerImageMetadata, hasMetadata := image.DockerImageMetadata.Object.(*docker10.DockerImage) + if !hasMetadata { + klog.V(5).Infof("image has no labels to match rule labels") + return false + } + + if !matchKeyValue(dockerImageMetadata.Config.Labels, rule.MatchDockerImageLabels) { + klog.V(5).Infof("image labels %#v do not match rule labels %#v", dockerImageMetadata.Config.Labels, rule.MatchDockerImageLabels) + return false + } + } + if !matchKeyValue(image.Annotations, rule.MatchImageAnnotations) { + klog.V(5).Infof("image annotations %#v do not match rule annotations %#v", image.Annotations, rule.MatchImageAnnotations) + return false + } + for _, s := range rule.MatchImageLabelSelectors { + if !s.Matches(labels.Set(image.Labels)) { + klog.V(5).Infof("image label selectors %#v do not match rule label selectors %#v", image.Labels, s) + return false + } + } + + return true +} + +func matchKeyValue(all map[string]string, conditions []imagepolicy.ValueCondition) bool { + for _, condition := range conditions { + switch { + case condition.Set: + if _, ok := all[condition.Key]; !ok { + return false + } + default: + if all[condition.Key] != condition.Value { + return false + } + } + } + return true +} + +func hasAnyMatch(name string, all []string) bool { + for _, s := range all { + if name == s { + return true + } + } + return false +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/quota/clusterresourcequota/accessor.go b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/quota/clusterresourcequota/accessor.go new file mode 100644 index 0000000000000..98877c93d691f --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/quota/clusterresourcequota/accessor.go @@ -0,0 +1,165 @@ +package clusterresourcequota + +import ( + "context" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + kapierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilwait "k8s.io/apimachinery/pkg/util/wait" + utilquota "k8s.io/apiserver/pkg/quota/v1" + "k8s.io/apiserver/pkg/storage" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/utils/lru" + + quotav1 "github.com/openshift/api/quota/v1" + quotatypedclient "github.com/openshift/client-go/quota/clientset/versioned/typed/quota/v1" + quotalister "github.com/openshift/client-go/quota/listers/quota/v1" + "github.com/openshift/library-go/pkg/quota/clusterquotamapping" + quotautil "github.com/openshift/library-go/pkg/quota/quotautil" +) + +type clusterQuotaAccessor struct { + clusterQuotaLister quotalister.ClusterResourceQuotaLister + namespaceLister corev1listers.NamespaceLister + clusterQuotaClient quotatypedclient.ClusterResourceQuotasGetter + + clusterQuotaMapper clusterquotamapping.ClusterQuotaMapper + + // updatedClusterQuotas holds a cache of quotas that we've updated. This is used to pull the "really latest" during back to + // back quota evaluations that touch the same quota doc. This only works because we can compare etcd resourceVersions + // for the same resource as integers. Before this change: 22 updates with 12 conflicts. after this change: 15 updates with 0 conflicts + updatedClusterQuotas *lru.Cache +} + +// newQuotaAccessor creates an object that conforms to the QuotaAccessor interface to be used to retrieve quota objects. +func newQuotaAccessor( + clusterQuotaLister quotalister.ClusterResourceQuotaLister, + namespaceLister corev1listers.NamespaceLister, + clusterQuotaClient quotatypedclient.ClusterResourceQuotasGetter, + clusterQuotaMapper clusterquotamapping.ClusterQuotaMapper, +) *clusterQuotaAccessor { + updatedCache := lru.New(100) + return &clusterQuotaAccessor{ + clusterQuotaLister: clusterQuotaLister, + namespaceLister: namespaceLister, + clusterQuotaClient: clusterQuotaClient, + clusterQuotaMapper: clusterQuotaMapper, + updatedClusterQuotas: updatedCache, + } +} + +// UpdateQuotaStatus the newQuota coming in will be incremented from the original. The difference between the original +// and the new is the amount to add to the namespace total, but the total status is the used value itself +func (e *clusterQuotaAccessor) UpdateQuotaStatus(newQuota *corev1.ResourceQuota) error { + clusterQuota, err := e.clusterQuotaLister.Get(newQuota.Name) + if err != nil { + return err + } + clusterQuota = e.checkCache(clusterQuota) + + // re-assign objectmeta + // make a copy + clusterQuota = clusterQuota.DeepCopy() + clusterQuota.ObjectMeta = newQuota.ObjectMeta + clusterQuota.Namespace = "" + + // determine change in usage + usageDiff := utilquota.Subtract(newQuota.Status.Used, clusterQuota.Status.Total.Used) + + // update aggregate usage + clusterQuota.Status.Total.Used = newQuota.Status.Used + + // update per namespace totals + oldNamespaceTotals, _ := quotautil.GetResourceQuotasStatusByNamespace(clusterQuota.Status.Namespaces, newQuota.Namespace) + namespaceTotalCopy := oldNamespaceTotals.DeepCopy() + newNamespaceTotals := *namespaceTotalCopy + newNamespaceTotals.Used = utilquota.Add(oldNamespaceTotals.Used, usageDiff) + quotautil.InsertResourceQuotasStatus(&clusterQuota.Status.Namespaces, quotav1.ResourceQuotaStatusByNamespace{ + Namespace: newQuota.Namespace, + Status: newNamespaceTotals, + }) + + updatedQuota, err := e.clusterQuotaClient.ClusterResourceQuotas().UpdateStatus(context.TODO(), clusterQuota, metav1.UpdateOptions{}) + if err != nil { + return err + } + + e.updatedClusterQuotas.Add(clusterQuota.Name, updatedQuota) + return nil +} + +var etcdVersioner = storage.APIObjectVersioner{} + +// checkCache compares the passed quota against the value in the look-aside cache and returns the newer +// if the cache is out of date, it deletes the stale entry. This only works because of etcd resourceVersions +// being monotonically increasing integers +func (e *clusterQuotaAccessor) checkCache(clusterQuota *quotav1.ClusterResourceQuota) *quotav1.ClusterResourceQuota { + uncastCachedQuota, ok := e.updatedClusterQuotas.Get(clusterQuota.Name) + if !ok { + return clusterQuota + } + cachedQuota := uncastCachedQuota.(*quotav1.ClusterResourceQuota) + + if etcdVersioner.CompareResourceVersion(clusterQuota, cachedQuota) >= 0 { + e.updatedClusterQuotas.Remove(clusterQuota.Name) + return clusterQuota + } + return cachedQuota +} + +func (e *clusterQuotaAccessor) GetQuotas(namespaceName string) ([]corev1.ResourceQuota, error) { + clusterQuotaNames, err := e.waitForReadyClusterQuotaNames(namespaceName) + if err != nil { + return nil, err + } + + resourceQuotas := []corev1.ResourceQuota{} + for _, clusterQuotaName := range clusterQuotaNames { + clusterQuota, err := e.clusterQuotaLister.Get(clusterQuotaName) + if kapierrors.IsNotFound(err) { + continue + } + if err != nil { + return nil, err + } + + clusterQuota = e.checkCache(clusterQuota) + + // now convert to a ResourceQuota + convertedQuota := corev1.ResourceQuota{} + convertedQuota.ObjectMeta = clusterQuota.ObjectMeta + convertedQuota.Namespace = namespaceName + convertedQuota.Spec = clusterQuota.Spec.Quota + convertedQuota.Status = clusterQuota.Status.Total + resourceQuotas = append(resourceQuotas, convertedQuota) + + } + + return resourceQuotas, nil +} + +func (e *clusterQuotaAccessor) waitForReadyClusterQuotaNames(namespaceName string) ([]string, error) { + var clusterQuotaNames []string + // wait for a valid mapping cache. The overall response can be delayed for up to 10 seconds. + err := utilwait.PollImmediate(100*time.Millisecond, 8*time.Second, func() (done bool, err error) { + var namespaceSelectionFields clusterquotamapping.SelectionFields + clusterQuotaNames, namespaceSelectionFields = e.clusterQuotaMapper.GetClusterQuotasFor(namespaceName) + namespace, err := e.namespaceLister.Get(namespaceName) + // if we can't find the namespace yet, just wait for the cache to update. Requests to non-existent namespaces + // may hang, but those people are doing something wrong and namespacelifecycle should reject them. + if kapierrors.IsNotFound(err) { + return false, nil + } + if err != nil { + return false, err + } + if equality.Semantic.DeepEqual(namespaceSelectionFields, clusterquotamapping.GetSelectionFields(namespace)) { + return true, nil + } + return false, nil + }) + return clusterQuotaNames, err +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/quota/clusterresourcequota/admission.go b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/quota/clusterresourcequota/admission.go new file mode 100644 index 0000000000000..8d1a322de49db --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/quota/clusterresourcequota/admission.go @@ -0,0 +1,239 @@ +package clusterresourcequota + +import ( + "context" + "errors" + "fmt" + "io" + "sort" + "sync" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + utilwait "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/admission/plugin/resourcequota" + resourcequotaapi "k8s.io/apiserver/pkg/admission/plugin/resourcequota/apis/resourcequota" + quota "k8s.io/apiserver/pkg/quota/v1" + "k8s.io/client-go/informers" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/rest" + "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/quota/v1/install" + + quotatypedclient "github.com/openshift/client-go/quota/clientset/versioned/typed/quota/v1" + quotainformer "github.com/openshift/client-go/quota/informers/externalversions/quota/v1" + quotalister "github.com/openshift/client-go/quota/listers/quota/v1" + "github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig" + "github.com/openshift/library-go/pkg/quota/clusterquotamapping" +) + +const ( + pluginName = "quota.openshift.io/ClusterResourceQuota" +) + +func Register(plugins *admission.Plugins) { + plugins.Register(pluginName, + func(config io.Reader) (admission.Interface, error) { + return NewClusterResourceQuota() + }) +} + +// clusterQuotaAdmission implements an admission controller that can enforce clusterQuota constraints +type clusterQuotaAdmission struct { + *admission.Handler + + // these are used to create the accessor + clusterQuotaLister quotalister.ClusterResourceQuotaLister + namespaceLister corev1listers.NamespaceLister + clusterQuotaSynced func() bool + namespaceSynced func() bool + clusterQuotaClient quotatypedclient.ClusterResourceQuotasGetter + clusterQuotaMapper clusterquotamapping.ClusterQuotaMapper + + lockFactory LockFactory + + // these are used to create the evaluator + registry quota.Registry + + init sync.Once + evaluator resourcequota.Evaluator +} + +var _ initializer.WantsExternalKubeInformerFactory = &clusterQuotaAdmission{} +var _ admissionrestconfig.WantsRESTClientConfig = &clusterQuotaAdmission{} +var _ WantsClusterQuota = &clusterQuotaAdmission{} +var _ WantsOriginQuotaRegistry = &clusterQuotaAdmission{} +var _ admission.ValidationInterface = &clusterQuotaAdmission{} + +const ( + timeToWaitForCacheSync = 10 * time.Second + numEvaluatorThreads = 10 +) + +// NewClusterResourceQuota configures an admission controller that can enforce clusterQuota constraints +// using the provided registry. The registry must have the capability to handle group/kinds that +// are persisted by the server this admission controller is intercepting +func NewClusterResourceQuota() (admission.Interface, error) { + return &clusterQuotaAdmission{ + Handler: admission.NewHandler(admission.Create, admission.Update), + lockFactory: NewDefaultLockFactory(), + }, nil +} + +// Admit makes admission decisions while enforcing clusterQuota +func (q *clusterQuotaAdmission) Validate(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) (err error) { + // ignore all operations that correspond to sub-resource actions + if len(a.GetSubresource()) != 0 { + return nil + } + + // Ignore cluster level resources. + // We can't get the namespace for the request because attributes namespace means "what namesapce is this scoped to", + // not "is this cluster scoped resource". This makes a different for namespaces that have attributes namespace set + // to its name. Namespaces are cluster level object that shouldn't go into this plugin or it get blocked listing + // the namespace that is just being created. + obj := a.GetObject() + accessor, err := metav1.Accessor(obj) + if err != nil { + klog.Warningf("ClusterQuotaAdmission received non object %T: %v", obj, err) + return nil + } + if len(accessor.GetNamespace()) == 0 { + return nil + } + + if !q.waitForSyncedStore(time.After(timeToWaitForCacheSync)) { + return admission.NewForbidden(a, fmt.Errorf("%s: caches not synchronized", pluginName)) + } + + q.init.Do(func() { + clusterQuotaAccessor := newQuotaAccessor(q.clusterQuotaLister, q.namespaceLister, q.clusterQuotaClient, q.clusterQuotaMapper) + q.evaluator = resourcequota.NewQuotaEvaluator(clusterQuotaAccessor, ignoredResources, q.registry, q.lockAquisition, &resourcequotaapi.Configuration{}, numEvaluatorThreads, utilwait.NeverStop) + }) + + return q.evaluator.Evaluate(a) +} + +func (q *clusterQuotaAdmission) lockAquisition(quotas []corev1.ResourceQuota) func() { + locks := []sync.Locker{} + + // acquire the locks in alphabetical order because I'm too lazy to think of something clever + sort.Sort(ByName(quotas)) + for _, quota := range quotas { + lock := q.lockFactory.GetLock(quota.Name) + lock.Lock() + locks = append(locks, lock) + } + + return func() { + for i := len(locks) - 1; i >= 0; i-- { + locks[i].Unlock() + } + } +} + +func (q *clusterQuotaAdmission) waitForSyncedStore(timeout <-chan time.Time) bool { + for !q.clusterQuotaSynced() || !q.namespaceSynced() { + select { + case <-time.After(100 * time.Millisecond): + case <-timeout: + return q.clusterQuotaSynced() && q.namespaceSynced() + } + } + + return true +} + +func (q *clusterQuotaAdmission) SetOriginQuotaRegistry(registry quota.Registry) { + q.registry = registry +} + +func (q *clusterQuotaAdmission) SetExternalKubeInformerFactory(informers informers.SharedInformerFactory) { + q.namespaceLister = informers.Core().V1().Namespaces().Lister() + q.namespaceSynced = informers.Core().V1().Namespaces().Informer().HasSynced +} + +func (q *clusterQuotaAdmission) SetRESTClientConfig(restClientConfig rest.Config) { + var err error + + // ClusterResourceQuota is served using CRD resource any status update must use JSON + jsonClientConfig := rest.CopyConfig(&restClientConfig) + jsonClientConfig.ContentConfig.AcceptContentTypes = "application/json" + jsonClientConfig.ContentConfig.ContentType = "application/json" + + q.clusterQuotaClient, err = quotatypedclient.NewForConfig(jsonClientConfig) + if err != nil { + utilruntime.HandleError(err) + return + } +} + +func (q *clusterQuotaAdmission) SetClusterQuota(clusterQuotaMapper clusterquotamapping.ClusterQuotaMapper, informers quotainformer.ClusterResourceQuotaInformer) { + q.clusterQuotaMapper = clusterQuotaMapper + q.clusterQuotaLister = informers.Lister() + q.clusterQuotaSynced = informers.Informer().HasSynced +} + +func (q *clusterQuotaAdmission) ValidateInitialization() error { + if q.clusterQuotaLister == nil { + return errors.New("missing clusterQuotaLister") + } + if q.namespaceLister == nil { + return errors.New("missing namespaceLister") + } + if q.clusterQuotaClient == nil { + return errors.New("missing clusterQuotaClient") + } + if q.clusterQuotaMapper == nil { + return errors.New("missing clusterQuotaMapper") + } + if q.registry == nil { + return errors.New("missing registry") + } + + return nil +} + +type ByName []corev1.ResourceQuota + +func (v ByName) Len() int { return len(v) } +func (v ByName) Swap(i, j int) { v[i], v[j] = v[j], v[i] } +func (v ByName) Less(i, j int) bool { return v[i].Name < v[j].Name } + +// ignoredResources is the set of resources that clusterquota ignores. It's larger because we have to ignore requests +// that the namespace lifecycle plugin ignores. This is because of the need to have a matching namespace in order to be sure +// that the cache is current enough to have mapped the CRQ to the namespaces. Normal RQ doesn't have that requirement. +var ignoredResources = map[schema.GroupResource]struct{}{} + +func init() { + for k := range install.DefaultIgnoredResources() { + ignoredResources[k] = struct{}{} + } + for k := range accessReviewResources { + ignoredResources[k] = struct{}{} + } + +} + +// accessReviewResources are resources which give a view into permissions in a namespace. Users must be allowed to create these +// resources because returning "not found" errors allows someone to search for the "people I'm going to fire in 2017" namespace. +var accessReviewResources = map[schema.GroupResource]bool{ + {Group: "authorization.k8s.io", Resource: "localsubjectaccessreviews"}: true, + {Group: "", Resource: "subjectaccessreviews"}: true, + {Group: "", Resource: "localsubjectaccessreviews"}: true, + {Group: "", Resource: "resourceaccessreviews"}: true, + {Group: "", Resource: "localresourceaccessreviews"}: true, + {Group: "", Resource: "selfsubjectrulesreviews"}: true, + {Group: "", Resource: "subjectrulesreviews"}: true, + {Group: "authorization.openshift.io", Resource: "subjectaccessreviews"}: true, + {Group: "authorization.openshift.io", Resource: "localsubjectaccessreviews"}: true, + {Group: "authorization.openshift.io", Resource: "resourceaccessreviews"}: true, + {Group: "authorization.openshift.io", Resource: "localresourceaccessreviews"}: true, + {Group: "authorization.openshift.io", Resource: "selfsubjectrulesreviews"}: true, + {Group: "authorization.openshift.io", Resource: "subjectrulesreviews"}: true, +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/quota/clusterresourcequota/intializers.go b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/quota/clusterresourcequota/intializers.go new file mode 100644 index 0000000000000..45d35a62afd19 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/quota/clusterresourcequota/intializers.go @@ -0,0 +1,51 @@ +package clusterresourcequota + +import ( + "k8s.io/apiserver/pkg/admission" + quota "k8s.io/apiserver/pkg/quota/v1" + + quotainformer "github.com/openshift/client-go/quota/informers/externalversions/quota/v1" + "github.com/openshift/library-go/pkg/quota/clusterquotamapping" +) + +func NewInitializer( + clusterResourceQuotaInformer quotainformer.ClusterResourceQuotaInformer, + clusterQuotaMapper clusterquotamapping.ClusterQuotaMapper, + quotaRegistry quota.Registry, +) admission.PluginInitializer { + return &localInitializer{ + clusterResourceQuotaInformer: clusterResourceQuotaInformer, + clusterQuotaMapper: clusterQuotaMapper, + quotaRegistry: quotaRegistry, + } +} + +// WantsClusterQuota should be implemented by admission plugins that need to know how to map between +// cluster quota and namespaces and get access to the informer. +type WantsClusterQuota interface { + SetClusterQuota(clusterquotamapping.ClusterQuotaMapper, quotainformer.ClusterResourceQuotaInformer) + admission.InitializationValidator +} + +// WantsQuotaRegistry should be implemented by admission plugins that need a quota registry +type WantsOriginQuotaRegistry interface { + SetOriginQuotaRegistry(quota.Registry) + admission.InitializationValidator +} + +type localInitializer struct { + clusterResourceQuotaInformer quotainformer.ClusterResourceQuotaInformer + clusterQuotaMapper clusterquotamapping.ClusterQuotaMapper + quotaRegistry quota.Registry +} + +// Initialize will check the initialization interfaces implemented by each plugin +// and provide the appropriate initialization data +func (i *localInitializer) Initialize(plugin admission.Interface) { + if wants, ok := plugin.(WantsClusterQuota); ok { + wants.SetClusterQuota(i.clusterQuotaMapper, i.clusterResourceQuotaInformer) + } + if wants, ok := plugin.(WantsOriginQuotaRegistry); ok { + wants.SetOriginQuotaRegistry(i.quotaRegistry) + } +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/quota/clusterresourcequota/lockfactory.go b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/quota/clusterresourcequota/lockfactory.go new file mode 100644 index 0000000000000..ed73d0e2d14f3 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/quota/clusterresourcequota/lockfactory.go @@ -0,0 +1,40 @@ +package clusterresourcequota + +import ( + "sync" +) + +type LockFactory interface { + GetLock(string) sync.Locker +} + +type DefaultLockFactory struct { + lock sync.RWMutex + + locks map[string]sync.Locker +} + +func NewDefaultLockFactory() *DefaultLockFactory { + return &DefaultLockFactory{locks: map[string]sync.Locker{}} +} + +func (f *DefaultLockFactory) GetLock(key string) sync.Locker { + lock, exists := f.getExistingLock(key) + if exists { + return lock + } + + f.lock.Lock() + defer f.lock.Unlock() + lock = &sync.Mutex{} + f.locks[key] = lock + return lock +} + +func (f *DefaultLockFactory) getExistingLock(key string) (sync.Locker, bool) { + f.lock.RLock() + defer f.lock.RUnlock() + + lock, exists := f.locks[key] + return lock, exists +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/authorization/scope/converter.go b/vendor/github.com/openshift/apiserver-library-go/pkg/authorization/scope/converter.go new file mode 100644 index 0000000000000..015cb0158f9f2 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/authorization/scope/converter.go @@ -0,0 +1,408 @@ +package scope + +import ( + "fmt" + + rbacv1 "k8s.io/api/rbac/v1" + kapierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + kutilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + kauthorizer "k8s.io/apiserver/pkg/authorization/authorizer" + rbaclisters "k8s.io/client-go/listers/rbac/v1" + rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" + authorizerrbac "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac" + + scopemetadata "github.com/openshift/library-go/pkg/authorization/scopemetadata" +) + +const ( + scopesAllNamespaces = "*" + + legacyGroupName = "" + coreGroupName = "" + kubeAuthorizationGroupName = "authorization.k8s.io" + openshiftAuthorizationGroupName = "authorization.openshift.io" + imageGroupName = "image.openshift.io" + networkGroupName = "network.openshift.io" + oauthGroupName = "oauth.openshift.io" + projectGroupName = "project.openshift.io" + userGroupName = "user.openshift.io" +) + +// scopeDiscoveryRule is a rule that allows a client to discover the API resources available on this server +var scopeDiscoveryRule = rbacv1.PolicyRule{ + Verbs: []string{"get"}, + NonResourceURLs: []string{ + // Server version checking + "/version", "/version/*", + + // API discovery/negotiation + "/api", "/api/*", + "/apis", "/apis/*", + "/oapi", "/oapi/*", + "/openapi/v2", + "/swaggerapi", "/swaggerapi/*", "/swagger.json", "/swagger-2.0.0.pb-v1", + "/osapi", "/osapi/", // these cannot be removed until we can drop support for pre 3.1 clients + "/.well-known", "/.well-known/*", + + // we intentionally allow all to here + "/", + }, +} + +// ScopesToRules takes the scopes and return the rules back. We ALWAYS add the discovery rules and it is possible to get some rules and and +// an error since errors aren't fatal to evaluation +func ScopesToRules(scopes []string, namespace string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error) { + rules := append([]rbacv1.PolicyRule{}, scopeDiscoveryRule) + + errors := []error{} + for _, scope := range scopes { + found := false + + for _, evaluator := range ScopeEvaluators { + if evaluator.Handles(scope) { + found = true + currRules, err := evaluator.ResolveRules(scope, namespace, clusterRoleGetter) + if err != nil { + errors = append(errors, err) + continue + } + + rules = append(rules, currRules...) + } + } + + if !found { + errors = append(errors, fmt.Errorf("no scope evaluator found for %q", scope)) + } + } + + return rules, kutilerrors.NewAggregate(errors) +} + +// ScopesToVisibleNamespaces returns a list of namespaces that the provided scopes have "get" access to. +// This exists only to support efficiently list/watch of projects (ACLed namespaces) +func ScopesToVisibleNamespaces(scopes []string, clusterRoleGetter rbaclisters.ClusterRoleLister, ignoreUnhandledScopes bool) (sets.String, error) { + if len(scopes) == 0 { + return sets.NewString("*"), nil + } + + visibleNamespaces := sets.String{} + + errors := []error{} + for _, scope := range scopes { + found := false + + for _, evaluator := range ScopeEvaluators { + if evaluator.Handles(scope) { + found = true + allowedNamespaces, err := evaluator.ResolveGettableNamespaces(scope, clusterRoleGetter) + if err != nil { + errors = append(errors, err) + continue + } + + visibleNamespaces.Insert(allowedNamespaces...) + break + } + } + + if !found && !ignoreUnhandledScopes { + errors = append(errors, fmt.Errorf("no scope evaluator found for %q", scope)) + } + } + + return visibleNamespaces, kutilerrors.NewAggregate(errors) +} + +const ( + UserIndicator = "user:" + ClusterRoleIndicator = "role:" +) + +// ScopeEvaluator takes a scope and returns the rules that express it +type ScopeEvaluator interface { + // Handles returns true if this evaluator can evaluate this scope + Handles(scope string) bool + // Validate returns an error if the scope is malformed + Validate(scope string) error + // Describe returns a description, warning (typically used to warn about escalation dangers), or an error if the scope is malformed + Describe(scope string) (description string, warning string, err error) + // ResolveRules returns the policy rules that this scope allows + ResolveRules(scope, namespace string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error) + ResolveGettableNamespaces(scope string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]string, error) +} + +// ScopeEvaluators map prefixes to a function that handles that prefix +var ScopeEvaluators = []ScopeEvaluator{ + userEvaluator{}, + clusterRoleEvaluator{}, +} + +// scopes are in the format +// +// we have the following formats: +// user: +// role:: +// TODO +// cluster:: +// namespace::: + +const ( + UserInfo = UserIndicator + "info" + UserAccessCheck = UserIndicator + "check-access" + + // UserListScopedProjects gives explicit permission to see the projects that this token can see. + UserListScopedProjects = UserIndicator + "list-scoped-projects" + + // UserListAllProjects gives explicit permission to see the projects a user can see. This is often used to prime secondary ACL systems + // unrelated to openshift and to display projects for selection in a secondary UI. + UserListAllProjects = UserIndicator + "list-projects" + + // UserFull includes all permissions of the user + UserFull = UserIndicator + "full" +) + +var defaultSupportedScopesMap = map[string]string{ + UserInfo: "Read-only access to your user information (including username, identities, and group membership)", + UserAccessCheck: `Read-only access to view your privileges (for example, "can I create builds?")`, + UserListScopedProjects: `Read-only access to list your projects viewable with this token and view their metadata (display name, description, etc.)`, + UserListAllProjects: `Read-only access to list your projects and view their metadata (display name, description, etc.)`, + UserFull: `Full read/write access with all of your permissions`, +} + +func DefaultSupportedScopes() []string { + return sets.StringKeySet(defaultSupportedScopesMap).List() +} + +func DescribeScopes(scopes []string) map[string]string { + ret := map[string]string{} + for _, s := range scopes { + val, ok := defaultSupportedScopesMap[s] + if ok { + ret[s] = val + } else { + ret[s] = "" + } + } + return ret +} + +// user: +type userEvaluator struct { + scopemetadata.UserEvaluator +} + +func (userEvaluator) ResolveRules(scope, namespace string, _ rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error) { + switch scope { + case UserInfo: + return []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get"). + Groups(userGroupName, legacyGroupName). + Resources("users"). + Names("~"). + RuleOrDie(), + }, nil + case UserAccessCheck: + return []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("create"). + Groups(kubeAuthorizationGroupName). + Resources("selfsubjectaccessreviews"). + RuleOrDie(), + rbacv1helpers.NewRule("create"). + Groups(openshiftAuthorizationGroupName, legacyGroupName). + Resources("selfsubjectrulesreviews"). + RuleOrDie(), + }, nil + case UserListScopedProjects: + return []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("list", "watch"). + Groups(projectGroupName, legacyGroupName). + Resources("projects"). + RuleOrDie(), + }, nil + case UserListAllProjects: + return []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("list", "watch"). + Groups(projectGroupName, legacyGroupName). + Resources("projects"). + RuleOrDie(), + rbacv1helpers.NewRule("get"). + Groups(coreGroupName). + Resources("namespaces"). + RuleOrDie(), + }, nil + case UserFull: + return []rbacv1.PolicyRule{ + rbacv1helpers.NewRule(rbacv1.VerbAll). + Groups(rbacv1.APIGroupAll). + Resources(rbacv1.ResourceAll). + RuleOrDie(), + rbacv1helpers.NewRule(rbacv1.VerbAll). + URLs(rbacv1.NonResourceAll). + RuleOrDie(), + }, nil + default: + return nil, fmt.Errorf("unrecognized scope: %v", scope) + } +} + +func (userEvaluator) ResolveGettableNamespaces(scope string, _ rbaclisters.ClusterRoleLister) ([]string, error) { + switch scope { + case UserFull, UserListAllProjects: + return []string{"*"}, nil + default: + return []string{}, nil + } +} + +// escalatingScopeResources are resources that are considered escalating for scope evaluation +var escalatingScopeResources = []schema.GroupResource{ + {Group: coreGroupName, Resource: "secrets"}, + {Group: imageGroupName, Resource: "imagestreams/secrets"}, + {Group: oauthGroupName, Resource: "oauthauthorizetokens"}, + {Group: oauthGroupName, Resource: "oauthaccesstokens"}, + {Group: openshiftAuthorizationGroupName, Resource: "roles"}, + {Group: openshiftAuthorizationGroupName, Resource: "rolebindings"}, + {Group: openshiftAuthorizationGroupName, Resource: "clusterroles"}, + {Group: openshiftAuthorizationGroupName, Resource: "clusterrolebindings"}, + // used in Service admission to create a service with external IP outside the allowed range + {Group: networkGroupName, Resource: "service/externalips"}, + + {Group: legacyGroupName, Resource: "imagestreams/secrets"}, + {Group: legacyGroupName, Resource: "oauthauthorizetokens"}, + {Group: legacyGroupName, Resource: "oauthaccesstokens"}, + {Group: legacyGroupName, Resource: "roles"}, + {Group: legacyGroupName, Resource: "rolebindings"}, + {Group: legacyGroupName, Resource: "clusterroles"}, + {Group: legacyGroupName, Resource: "clusterrolebindings"}, +} + +// role:: +type clusterRoleEvaluator struct { + scopemetadata.ClusterRoleEvaluator +} + +var clusterRoleEvaluatorInstance = clusterRoleEvaluator{} + +func (e clusterRoleEvaluator) ResolveRules(scope, namespace string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error) { + _, scopeNamespace, _, err := scopemetadata.ClusterRoleEvaluatorParseScope(scope) + if err != nil { + return nil, err + } + + // if the scope limit on the clusterrole doesn't match, then don't add any rules, but its not an error + if !(scopeNamespace == scopesAllNamespaces || scopeNamespace == namespace) { + return []rbacv1.PolicyRule{}, nil + } + + return e.resolveRules(scope, clusterRoleGetter) +} + +func has(set []string, value string) bool { + for _, element := range set { + if value == element { + return true + } + } + return false +} + +// resolveRules doesn't enforce namespace checks +func (e clusterRoleEvaluator) resolveRules(scope string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]rbacv1.PolicyRule, error) { + roleName, _, escalating, err := scopemetadata.ClusterRoleEvaluatorParseScope(scope) + if err != nil { + return nil, err + } + + role, err := clusterRoleGetter.Get(roleName) + if err != nil { + if kapierrors.IsNotFound(err) { + return []rbacv1.PolicyRule{}, nil + } + return nil, err + } + + rules := []rbacv1.PolicyRule{} + for _, rule := range role.Rules { + if escalating { + rules = append(rules, rule) + continue + } + + // rules with unbounded access shouldn't be allowed in scopes. + if has(rule.Verbs, rbacv1.VerbAll) || + has(rule.Resources, rbacv1.ResourceAll) || + has(rule.APIGroups, rbacv1.APIGroupAll) { + continue + } + // rules that allow escalating resource access should be cleaned. + safeRule := removeEscalatingResources(rule) + rules = append(rules, safeRule) + } + + return rules, nil +} + +func (e clusterRoleEvaluator) ResolveGettableNamespaces(scope string, clusterRoleGetter rbaclisters.ClusterRoleLister) ([]string, error) { + _, scopeNamespace, _, err := scopemetadata.ClusterRoleEvaluatorParseScope(scope) + if err != nil { + return nil, err + } + rules, err := e.resolveRules(scope, clusterRoleGetter) + if err != nil { + return nil, err + } + + attributes := kauthorizer.AttributesRecord{ + APIGroup: coreGroupName, + Verb: "get", + Resource: "namespaces", + ResourceRequest: true, + } + + if authorizerrbac.RulesAllow(attributes, rules...) { + return []string{scopeNamespace}, nil + } + + return []string{}, nil +} + +func remove(array []string, item string) []string { + newar := array[:0] + for _, element := range array { + if element != item { + newar = append(newar, element) + } + } + return newar +} + +// removeEscalatingResources inspects a PolicyRule and removes any references to escalating resources. +// It has coarse logic for now. It is possible to rewrite one rule into many for the finest grain control +// but removing the entire matching resource regardless of verb or secondary group is cheaper, easier, and errs on the side removing +// too much, not too little +func removeEscalatingResources(in rbacv1.PolicyRule) rbacv1.PolicyRule { + var ruleCopy *rbacv1.PolicyRule + + for _, resource := range escalatingScopeResources { + if !(has(in.APIGroups, resource.Group) && has(in.Resources, resource.Resource)) { + continue + } + + if ruleCopy == nil { + // we're using a cache of cache of an object that uses pointers to data. I'm pretty sure we need to do a copy to avoid + // muddying the cache + ruleCopy = in.DeepCopy() + } + + ruleCopy.Resources = remove(ruleCopy.Resources, resource.Resource) + } + + if ruleCopy != nil { + return *ruleCopy + } + + return in +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/configflags/audit.go b/vendor/github.com/openshift/apiserver-library-go/pkg/configflags/audit.go new file mode 100644 index 0000000000000..f7d378daf145f --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/configflags/audit.go @@ -0,0 +1,54 @@ +package configflags + +import ( + "io/ioutil" + "os" + "path/filepath" + "strconv" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + + configv1 "github.com/openshift/api/config/v1" +) + +const defaultAuditPolicyFilePath = "openshift.local.audit/policy.yaml" + +func AuditFlags(c *configv1.AuditConfig, args map[string][]string) map[string][]string { + if !c.Enabled { + return args + } + + auditPolicyFilePath := c.PolicyFile + if len(c.PolicyConfiguration.Raw) > 0 && string(c.PolicyConfiguration.Raw) != "null" { + if len(auditPolicyFilePath) == 0 { + auditPolicyFilePath = defaultAuditPolicyFilePath + } + if err := os.MkdirAll(filepath.Dir(auditPolicyFilePath), 0755); err != nil { + utilruntime.HandleError(err) + } + if err := ioutil.WriteFile(auditPolicyFilePath, c.PolicyConfiguration.Raw, 0644); err != nil { + utilruntime.HandleError(err) + } + } + + SetIfUnset(args, "audit-log-maxbackup", strconv.Itoa(int(c.MaximumRetainedFiles))) + SetIfUnset(args, "audit-log-maxsize", strconv.Itoa(int(c.MaximumFileSizeMegabytes))) + SetIfUnset(args, "audit-log-maxage", strconv.Itoa(int(c.MaximumFileRetentionDays))) + auditFilePath := c.AuditFilePath + if len(auditFilePath) == 0 { + auditFilePath = "-" + } + SetIfUnset(args, "audit-log-path", auditFilePath) + if len(auditPolicyFilePath) > 0 { + SetIfUnset(args, "audit-policy-file", auditPolicyFilePath) + } + if len(c.LogFormat) > 0 { + SetIfUnset(args, "audit-log-format", string(c.LogFormat)) + } + if len(c.WebHookMode) > 0 { + SetIfUnset(args, "audit-webhook-mode", string(c.WebHookMode)) + } + SetIfUnset(args, "audit-webhook-config-file", string(c.WebHookKubeConfig)) + + return args +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/configflags/helpers.go b/vendor/github.com/openshift/apiserver-library-go/pkg/configflags/helpers.go new file mode 100644 index 0000000000000..12870a4b5916b --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/configflags/helpers.go @@ -0,0 +1,43 @@ +package configflags + +import ( + "fmt" + "sort" + "strings" +) + +// ArgsWithPrefix filters arguments by prefix and collects values. +func ArgsWithPrefix(args map[string][]string, prefix string) map[string][]string { + filtered := map[string][]string{} + for key, slice := range args { + if !strings.HasPrefix(key, prefix) { + continue + } + for _, val := range slice { + filtered[key] = append(filtered[key], val) + } + } + return filtered +} + +func SetIfUnset(cmdLineArgs map[string][]string, key string, value ...string) { + if _, ok := cmdLineArgs[key]; !ok { + cmdLineArgs[key] = value + } +} + +func ToFlagSlice(args map[string][]string) []string { + var keys []string + for key := range args { + keys = append(keys, key) + } + sort.Strings(keys) + + var flags []string + for _, key := range keys { + for _, token := range args[key] { + flags = append(flags, fmt.Sprintf("--%s=%v", key, token)) + } + } + return flags +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/labelselector/labelselector.go b/vendor/github.com/openshift/apiserver-library-go/pkg/labelselector/labelselector.go new file mode 100644 index 0000000000000..c0cc827100304 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/labelselector/labelselector.go @@ -0,0 +1,360 @@ +// labelselector is trim down version of k8s/pkg/labels/selector.go +// It only accepts exact label matches +// Example: "k1=v1, k2 = v2" +package labelselector + +import ( + "fmt" + + kvalidation "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// constants definition for lexer token +type Token int + +const ( + ErrorToken Token = iota + EndOfStringToken + CommaToken + EqualsToken + IdentifierToken // to represent keys and values +) + +// string2token contains the mapping between lexer Token and token literal +// (except IdentifierToken, EndOfStringToken and ErrorToken since it makes no sense) +var string2token = map[string]Token{ + ",": CommaToken, + "=": EqualsToken, +} + +// ScannedItem are the item produced by the lexer. It contains the Token and the literal. +type ScannedItem struct { + tok Token + literal string +} + +// isWhitespace returns true if the rune is a space, tab, or newline. +func isWhitespace(ch byte) bool { + return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n' +} + +// isSpecialSymbol detect if the character ch can be an operator +func isSpecialSymbol(ch byte) bool { + switch ch { + case '=', ',': + return true + } + return false +} + +// Lexer represents the Lexer struct for label selector. +// It contains necessary informationt to tokenize the input string +type Lexer struct { + // s stores the string to be tokenized + s string + // pos is the position currently tokenized + pos int +} + +// read return the character currently lexed +// increment the position and check the buffer overflow +func (l *Lexer) read() (b byte) { + b = 0 + if l.pos < len(l.s) { + b = l.s[l.pos] + l.pos++ + } + return b +} + +// unread 'undoes' the last read character +func (l *Lexer) unread() { + l.pos-- +} + +// scanIdOrKeyword scans string to recognize literal token or an identifier. +func (l *Lexer) scanIdOrKeyword() (tok Token, lit string) { + var buffer []byte +IdentifierLoop: + for { + switch ch := l.read(); { + case ch == 0: + break IdentifierLoop + case isSpecialSymbol(ch) || isWhitespace(ch): + l.unread() + break IdentifierLoop + default: + buffer = append(buffer, ch) + } + } + s := string(buffer) + if val, ok := string2token[s]; ok { // is a literal token + return val, s + } + return IdentifierToken, s // otherwise is an identifier +} + +// scanSpecialSymbol scans string starting with special symbol. +// special symbol identify non literal operators: "=" +func (l *Lexer) scanSpecialSymbol() (Token, string) { + lastScannedItem := ScannedItem{} + var buffer []byte +SpecialSymbolLoop: + for { + switch ch := l.read(); { + case ch == 0: + break SpecialSymbolLoop + case isSpecialSymbol(ch): + buffer = append(buffer, ch) + if token, ok := string2token[string(buffer)]; ok { + lastScannedItem = ScannedItem{tok: token, literal: string(buffer)} + } else if lastScannedItem.tok != 0 { + l.unread() + break SpecialSymbolLoop + } + default: + l.unread() + break SpecialSymbolLoop + } + } + if lastScannedItem.tok == 0 { + return ErrorToken, fmt.Sprintf("error expected: keyword found '%s'", buffer) + } + return lastScannedItem.tok, lastScannedItem.literal +} + +// skipWhiteSpaces consumes all blank characters +// returning the first non blank character +func (l *Lexer) skipWhiteSpaces(ch byte) byte { + for { + if !isWhitespace(ch) { + return ch + } + ch = l.read() + } +} + +// Lex returns a pair of Token and the literal +// literal is meaningfull only for IdentifierToken token +func (l *Lexer) Lex() (tok Token, lit string) { + switch ch := l.skipWhiteSpaces(l.read()); { + case ch == 0: + return EndOfStringToken, "" + case isSpecialSymbol(ch): + l.unread() + return l.scanSpecialSymbol() + default: + l.unread() + return l.scanIdOrKeyword() + } +} + +// Parser data structure contains the label selector parser data structure +type Parser struct { + l *Lexer + scannedItems []ScannedItem + position int +} + +// lookahead func returns the current token and string. No increment of current position +func (p *Parser) lookahead() (Token, string) { + tok, lit := p.scannedItems[p.position].tok, p.scannedItems[p.position].literal + return tok, lit +} + +// consume returns current token and string. Increments the the position +func (p *Parser) consume() (Token, string) { + p.position++ + if p.position > len(p.scannedItems) { + return EndOfStringToken, "" + } + tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal + return tok, lit +} + +// scan runs through the input string and stores the ScannedItem in an array +// Parser can now lookahead and consume the tokens +func (p *Parser) scan() { + for { + token, literal := p.l.Lex() + p.scannedItems = append(p.scannedItems, ScannedItem{token, literal}) + if token == EndOfStringToken { + break + } + } +} + +// parse runs the left recursive descending algorithm +// on input string. It returns a list of map[key]value. +func (p *Parser) parse() (map[string]string, error) { + p.scan() // init scannedItems + + labelsMap := map[string]string{} + for { + tok, lit := p.lookahead() + switch tok { + case IdentifierToken: + key, value, err := p.parseLabel() + if err != nil { + return nil, fmt.Errorf("unable to parse requirement: %v", err) + } + labelsMap[key] = value + t, l := p.consume() + switch t { + case EndOfStringToken: + return labelsMap, nil + case CommaToken: + t2, l2 := p.lookahead() + if t2 != IdentifierToken { + return nil, fmt.Errorf("found '%s', expected: identifier after ','", l2) + } + default: + return nil, fmt.Errorf("found '%s', expected: ',' or 'end of string'", l) + } + case EndOfStringToken: + return labelsMap, nil + default: + return nil, fmt.Errorf("found '%s', expected: identifier or 'end of string'", lit) + } + } +} + +func (p *Parser) parseLabel() (string, string, error) { + key, err := p.parseKey() + if err != nil { + return "", "", err + } + op, err := p.parseOperator() + if err != nil { + return "", "", err + } + if op != "=" { + return "", "", fmt.Errorf("invalid operator: %s, expected: '='", op) + } + value, err := p.parseExactValue() + if err != nil { + return "", "", err + } + return key, value, nil +} + +// parseKey parse literals. +func (p *Parser) parseKey() (string, error) { + tok, literal := p.consume() + if tok != IdentifierToken { + err := fmt.Errorf("found '%s', expected: identifier", literal) + return "", err + } + if err := validateLabelKey(literal); err != nil { + return "", err + } + return literal, nil +} + +// parseOperator returns operator +func (p *Parser) parseOperator() (op string, err error) { + tok, lit := p.consume() + switch tok { + case EqualsToken: + op = "=" + default: + return "", fmt.Errorf("found '%s', expected: '='", lit) + } + return op, nil +} + +// parseExactValue parses the only value for exact match style +func (p *Parser) parseExactValue() (string, error) { + if tok, _ := p.lookahead(); tok == EndOfStringToken || tok == CommaToken { + return "", nil + } + tok, lit := p.consume() + if tok != IdentifierToken { + return "", fmt.Errorf("found '%s', expected: identifier", lit) + } + if err := validateLabelValue(lit); err != nil { + return "", err + } + return lit, nil +} + +// Parse takes a string representing a selector and returns +// map[key]value, or an error. +// The input will cause an error if it does not follow this form: +// +// ::= [ | "," ] +// ::= KEY "=" VALUE +// KEY is a sequence of one or more characters following [ DNS_SUBDOMAIN "/" ] DNS_LABEL +// VALUE is a sequence of zero or more characters "([A-Za-z0-9_-\.])". Max length is 64 character. +// Delimiter is white space: (' ', '\t') +func Parse(selector string) (map[string]string, error) { + p := &Parser{l: &Lexer{s: selector, pos: 0}} + labels, error := p.parse() + if error != nil { + return map[string]string{}, error + } + return labels, nil +} + +// Conflicts takes 2 maps +// returns true if there a key match between the maps but the value doesn't match +// returns false in other cases +func Conflicts(labels1, labels2 map[string]string) bool { + for k, v := range labels1 { + if val, match := labels2[k]; match { + if val != v { + return true + } + } + } + return false +} + +// Merge combines given maps +// Note: It doesn't not check for any conflicts between the maps +func Merge(labels1, labels2 map[string]string) map[string]string { + mergedMap := map[string]string{} + + for k, v := range labels1 { + mergedMap[k] = v + } + for k, v := range labels2 { + mergedMap[k] = v + } + return mergedMap +} + +// Equals returns true if the given maps are equal +func Equals(labels1, labels2 map[string]string) bool { + if len(labels1) != len(labels2) { + return false + } + + for k, v := range labels1 { + value, ok := labels2[k] + if !ok { + return false + } + if value != v { + return false + } + } + return true +} + +const qualifiedNameErrorMsg string = "must match format [ DNS 1123 subdomain / ] DNS 1123 label" + +func validateLabelKey(k string) error { + if len(kvalidation.IsQualifiedName(k)) != 0 { + return field.Invalid(field.NewPath("label key"), k, qualifiedNameErrorMsg) + } + return nil +} + +func validateLabelValue(v string) error { + if len(kvalidation.IsValidLabelValue(v)) != 0 { + return field.Invalid(field.NewPath("label value"), v, qualifiedNameErrorMsg) + } + return nil +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/capabilities/mustrunas.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/capabilities/mustrunas.go new file mode 100644 index 0000000000000..543911941073e --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/capabilities/mustrunas.go @@ -0,0 +1,149 @@ +package capabilities + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + api "k8s.io/kubernetes/pkg/apis/core" + + securityv1 "github.com/openshift/api/security/v1" +) + +// defaultCapabilities implements the CapabilitiesSecurityContextConstraintsStrategy interface +type defaultCapabilities struct { + defaultAddCapabilities []corev1.Capability + requiredDropCapabilities []corev1.Capability + allowedCaps []corev1.Capability +} + +var _ CapabilitiesSecurityContextConstraintsStrategy = &defaultCapabilities{} + +// NewDefaultCapabilities creates a new defaultCapabilities strategy that will provide defaults and validation +// based on the configured initial caps and allowed caps. +func NewDefaultCapabilities(defaultAddCapabilities, requiredDropCapabilities, allowedCaps []corev1.Capability) (CapabilitiesSecurityContextConstraintsStrategy, error) { + return &defaultCapabilities{ + defaultAddCapabilities: defaultAddCapabilities, + requiredDropCapabilities: requiredDropCapabilities, + allowedCaps: allowedCaps, + }, nil +} + +// Generate creates the capabilities based on policy rules. Generate will produce the following: +// 1. a capabilities.Add set containing all the required adds (unless the +// container specifically is dropping the cap) and container requested adds +// 2. a capabilities.Drop set containing all the required drops and container requested drops +// +// Returns the original container capabilities if no changes are required. +func (s *defaultCapabilities) Generate(pod *api.Pod, container *api.Container) (*api.Capabilities, error) { + defaultAdd := makeCapSet(s.defaultAddCapabilities) + requiredDrop := makeCapSet(s.requiredDropCapabilities) + containerAdd := sets.NewString() + containerDrop := sets.NewString() + + var containerCapabilities *api.Capabilities + if container.SecurityContext != nil && container.SecurityContext.Capabilities != nil { + containerCapabilities = container.SecurityContext.Capabilities + containerAdd = makeCapSetInternal(container.SecurityContext.Capabilities.Add) + containerDrop = makeCapSetInternal(container.SecurityContext.Capabilities.Drop) + } + + // remove any default adds that the container is specifically dropping + defaultAdd = defaultAdd.Difference(containerDrop) + + combinedAdd := defaultAdd.Union(containerAdd) + combinedDrop := requiredDrop.Union(containerDrop) + + // no changes? return the original capabilities + if (len(combinedAdd) == len(containerAdd)) && (len(combinedDrop) == len(containerDrop)) { + return containerCapabilities, nil + } + + return &api.Capabilities{ + Add: capabilityFromStringSlice(combinedAdd.List()), + Drop: capabilityFromStringSlice(combinedDrop.List()), + }, nil +} + +// Validate ensures that the specified values fall within the range of the strategy. +func (s *defaultCapabilities) Validate(fldPath *field.Path, pod *api.Pod, container *api.Container, capabilities *api.Capabilities) field.ErrorList { + allErrs := field.ErrorList{} + + if capabilities == nil { + // if container.SC.Caps is nil then nothing was defaulted by the strat or requested by the pod author + // if there are no required caps on the strategy and nothing is requested on the pod + // then we can safely return here without further validation. + if len(s.defaultAddCapabilities) == 0 && len(s.requiredDropCapabilities) == 0 { + return allErrs + } + + // container has no requested caps but we have required caps. We should have something in + // at least the drops on the container. + allErrs = append(allErrs, field.Invalid(fldPath.Child("capabilities"), capabilities, + "required capabilities are not set on the securityContext")) + return allErrs + } + + allowedAdd := makeCapSet(s.allowedCaps) + allowAllCaps := allowedAdd.Has(string(securityv1.AllowAllCapabilities)) + if allowAllCaps { + // skip validation against allowed/defaultAdd/requiredDrop because all capabilities are allowed by a wildcard + return allErrs + } + + // validate that anything being added is in the default or allowed sets + defaultAdd := makeCapSet(s.defaultAddCapabilities) + + for _, cap := range capabilities.Add { + sCap := string(cap) + if !defaultAdd.Has(sCap) && !allowedAdd.Has(sCap) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("capabilities", "add"), sCap, "capability may not be added")) + } + } + + // validate that anything that is required to be dropped is in the drop set + containerDrops := makeCapSetInternal(capabilities.Drop) + + for _, requiredDrop := range s.requiredDropCapabilities { + sDrop := string(requiredDrop) + if !containerDrops.Has(sDrop) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("capabilities", "drop"), capabilities.Drop, + fmt.Sprintf("%s is required to be dropped but was not found", sDrop))) + } + } + + return allErrs +} + +// capabilityFromStringSlice creates a capability slice from a string slice. +func capabilityFromStringSlice(slice []string) []api.Capability { + if len(slice) == 0 { + return nil + } + caps := []api.Capability{} + for _, c := range slice { + caps = append(caps, api.Capability(c)) + } + return caps +} + +// makeCapSet makes a string set from capabilities and normalizes them to be all lower case to help +// with comparisons. +func makeCapSetInternal(caps []api.Capability) sets.String { + s := sets.NewString() + for _, c := range caps { + s.Insert(string(c)) + } + return s +} + +// makeCapSet makes a string set from capabilities and normalizes them to be all lower case to help +// with comparisons. +func makeCapSet(caps []corev1.Capability) sets.String { + s := sets.NewString() + for _, c := range caps { + s.Insert(string(c)) + } + return s +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/capabilities/types.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/capabilities/types.go new file mode 100644 index 0000000000000..2c030d3947bbf --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/capabilities/types.go @@ -0,0 +1,14 @@ +package capabilities + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + api "k8s.io/kubernetes/pkg/apis/core" +) + +// CapabilitiesSecurityContextConstraintsStrategy defines the interface for all cap constraint strategies. +type CapabilitiesSecurityContextConstraintsStrategy interface { + // Generate creates the capabilities based on policy rules. + Generate(pod *api.Pod, container *api.Container) (*api.Capabilities, error) + // Validate ensures that the specified values fall within the range of the strategy. + Validate(fldPath *field.Path, pod *api.Pod, container *api.Container, capabilities *api.Capabilities) field.ErrorList +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/group/mustrunas.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/group/mustrunas.go new file mode 100644 index 0000000000000..14d3f06ee3b04 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/group/mustrunas.go @@ -0,0 +1,76 @@ +package group + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/validation/field" + api "k8s.io/kubernetes/pkg/apis/core" + + securityv1 "github.com/openshift/api/security/v1" +) + +// mustRunAs implements the GroupSecurityContextConstraintsStrategy interface +type mustRunAs struct { + ranges []securityv1.IDRange + field string +} + +var _ GroupSecurityContextConstraintsStrategy = &mustRunAs{} + +// NewMustRunAs provides a new MustRunAs strategy based on ranges. +func NewMustRunAs(ranges []securityv1.IDRange, field string) (GroupSecurityContextConstraintsStrategy, error) { + if len(ranges) == 0 { + return nil, fmt.Errorf("ranges must be supplied for MustRunAs") + } + return &mustRunAs{ + ranges: ranges, + field: field, + }, nil +} + +// Generate creates the group based on policy rules. By default this returns the first group of the +// first range (min val). +func (s *mustRunAs) Generate(_ *api.Pod) ([]int64, error) { + return []int64{s.ranges[0].Min}, nil +} + +// Generate a single value to be applied. This is used for FSGroup. This strategy will return +// the first group of the first range (min val). +func (s *mustRunAs) GenerateSingle(_ *api.Pod) (*int64, error) { + single := new(int64) + *single = s.ranges[0].Min + return single, nil +} + +// Validate ensures that the specified values fall within the range of the strategy. +// Groups are passed in here to allow this strategy to support multiple group fields (fsgroup and +// supplemental groups). +func (s *mustRunAs) Validate(fldPath *field.Path, _ *api.Pod, groups []int64) field.ErrorList { + allErrs := field.ErrorList{} + + if len(groups) == 0 && len(s.ranges) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child(s.field), groups, "unable to validate empty groups against required ranges")) + } + + for _, group := range groups { + if !s.isGroupValid(group) { + detail := fmt.Sprintf("%d is not an allowed group", group) + allErrs = append(allErrs, field.Invalid(fldPath.Child(s.field), groups, detail)) + } + } + + return allErrs +} + +func (s *mustRunAs) isGroupValid(group int64) bool { + for _, rng := range s.ranges { + if fallsInRange(group, rng) { + return true + } + } + return false +} + +func fallsInRange(group int64, rng securityv1.IDRange) bool { + return group >= rng.Min && group <= rng.Max +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/group/runasany.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/group/runasany.go new file mode 100644 index 0000000000000..01b300e4c0f6c --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/group/runasany.go @@ -0,0 +1,33 @@ +package group + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + api "k8s.io/kubernetes/pkg/apis/core" +) + +// mustRunAs implements the GroupSecurityContextConstraintsStrategy interface +type runAsAny struct { +} + +var _ GroupSecurityContextConstraintsStrategy = &runAsAny{} + +// NewRunAsAny provides a new RunAsAny strategy. +func NewRunAsAny() (GroupSecurityContextConstraintsStrategy, error) { + return &runAsAny{}, nil +} + +// Generate creates the group based on policy rules. This strategy returns an empty slice. +func (s *runAsAny) Generate(_ *api.Pod) ([]int64, error) { + return nil, nil +} + +// Generate a single value to be applied. This is used for FSGroup. This strategy returns nil. +func (s *runAsAny) GenerateSingle(_ *api.Pod) (*int64, error) { + return nil, nil +} + +// Validate ensures that the specified values fall within the range of the strategy. +func (s *runAsAny) Validate(fldPath *field.Path, _ *api.Pod, groups []int64) field.ErrorList { + return field.ErrorList{} + +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/group/types.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/group/types.go new file mode 100644 index 0000000000000..752195ab957f9 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/group/types.go @@ -0,0 +1,19 @@ +package group + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + api "k8s.io/kubernetes/pkg/apis/core" +) + +// GroupSecurityContextConstraintsStrategy defines the interface for all group constraint strategies. +type GroupSecurityContextConstraintsStrategy interface { + // Generate creates the group based on policy rules. The underlying implementation can + // decide whether it will return a full range of values or a subset of values from the + // configured ranges. + Generate(pod *api.Pod) ([]int64, error) + // Generate a single value to be applied. The underlying implementation decides which + // value to return if configured with multiple ranges. This is used for FSGroup. + GenerateSingle(pod *api.Pod) (*int64, error) + // Validate ensures that the specified values fall within the range of the strategy. + Validate(fldPath *field.Path, pod *api.Pod, groups []int64) field.ErrorList +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccadmission/admission.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccadmission/admission.go new file mode 100644 index 0000000000000..701d978f7ca45 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccadmission/admission.go @@ -0,0 +1,604 @@ +package sccadmission + +import ( + "context" + "fmt" + "io" + "sort" + "strings" + "time" + + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/labels" + kutilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/authentication/serviceaccount" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/client-go/informers" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + coreapi "k8s.io/kubernetes/pkg/apis/core" + kapihelper "k8s.io/kubernetes/pkg/apis/core/helper" + rbacregistry "k8s.io/kubernetes/pkg/registry/rbac" + + securityv1 "github.com/openshift/api/security/v1" + securityv1informer "github.com/openshift/client-go/security/informers/externalversions/security/v1" + securityv1listers "github.com/openshift/client-go/security/listers/security/v1" + + "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccmatching" + sccsort "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util/sort" +) + +const PluginName = "security.openshift.io/SecurityContextConstraint" + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, + func(config io.Reader) (admission.Interface, error) { + return NewConstraint(), nil + }) +} + +type constraint struct { + *admission.Handler + sccLister securityv1listers.SecurityContextConstraintsLister + namespaceLister corev1listers.NamespaceLister + listersSynced []cache.InformerSynced + authorizer authorizer.Authorizer +} + +var ( + _ = initializer.WantsAuthorizer(&constraint{}) + _ = WantsSecurityInformer(&constraint{}) + _ = initializer.WantsExternalKubeInformerFactory(&constraint{}) + _ = admission.ValidationInterface(&constraint{}) + _ = admission.MutationInterface(&constraint{}) +) + +// NewConstraint creates a new SCC constraint admission plugin. +func NewConstraint() *constraint { + return &constraint{ + Handler: admission.NewHandler(admission.Create, admission.Update), + } +} + +// Admit determines if the pod should be admitted based on the requested security context +// and the available SCCs. +// +// 1. Find SCCs for the user. +// 2. Find SCCs for the SA. If there is an error retrieving SA SCCs it is not fatal. +// 3. Remove duplicates between the user/SA SCCs. +// 4. Create the providers, includes setting pre-allocated values if necessary. +// 5. Try to generate and validate an SCC with providers. If we find one then admit the pod +// with the validated SCC. If we don't find any reject the pod and give all errors from the +// failed attempts. +// +// On updates, the BeforeUpdate of the pod strategy only zeroes out the status. That means that +// any change that claims the pod is no longer privileged will be removed. That should hold until +// we get a true old/new set of objects in. +func (c *constraint) Admit(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) error { + if ignore, err := shouldIgnore(a); err != nil { + return err + } else if ignore { + return nil + } + pod := a.GetObject().(*coreapi.Pod) + + // deny changes to required SCC annotation during updates + if a.GetOperation() == admission.Update { + oldPod := a.GetOldObject().(*coreapi.Pod) + + if pod.ObjectMeta.Annotations[securityv1.RequiredSCCAnnotation] != oldPod.ObjectMeta.Annotations[securityv1.RequiredSCCAnnotation] { + return admission.NewForbidden(a, fmt.Errorf("invalid change of required security context constraint annotation: %v", securityv1.RequiredSCCAnnotation)) + } + } + + // TODO(liggitt): allow spec mutation during initializing updates? + specMutationAllowed := a.GetOperation() == admission.Create + ephemeralContainersMutationAllowed := specMutationAllowed || (a.GetOperation() == admission.Update && a.GetSubresource() == "ephemeralcontainers") + + allowedPod, sccName, validationErrs, err := c.computeSecurityContext(ctx, a, pod, specMutationAllowed, ephemeralContainersMutationAllowed, pod.ObjectMeta.Annotations[securityv1.RequiredSCCAnnotation], "") + if err != nil { + return admission.NewForbidden(a, err) + } + + if allowedPod != nil { + *pod = *allowedPod + // annotate and accept the pod + klog.V(4).Infof("pod %s (generate: %s) validated against provider %s", pod.Name, pod.GenerateName, sccName) + if pod.ObjectMeta.Annotations == nil { + pod.ObjectMeta.Annotations = map[string]string{} + } + pod.ObjectMeta.Annotations[securityv1.ValidatedSCCAnnotation] = sccName + return nil + } + + // we didn't validate against any security context constraint provider, reject the pod and give the errors for each attempt + klog.V(4).Infof("unable to validate pod %s (generate: %s) against any security context constraint: %v", pod.Name, pod.GenerateName, validationErrs.ToAggregate()) + return admission.NewForbidden(a, fmt.Errorf("unable to validate against any security context constraint: %v", validationErrs.ToAggregate())) +} + +func (c *constraint) Validate(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) error { + if ignore, err := shouldIgnore(a); err != nil { + return err + } else if ignore { + return nil + } + pod := a.GetObject().(*coreapi.Pod) + + // this one is required + requiredSCCName := pod.ObjectMeta.Annotations[securityv1.RequiredSCCAnnotation] + // this one is non-binding + validatedSCCNameHint := pod.ObjectMeta.Annotations[securityv1.ValidatedSCCAnnotation] + if len(requiredSCCName) > 0 && requiredSCCName != validatedSCCNameHint { + return admission.NewForbidden(a, fmt.Errorf("required scc/%v does not match the suggested scc/%v", requiredSCCName, validatedSCCNameHint)) + } + + // compute the context. Mutation is not allowed. ValidatedSCCAnnotation is used as a hint to gain same speed-up. + allowedPod, _, validationErrs, err := c.computeSecurityContext(ctx, a, pod, false, false, requiredSCCName, validatedSCCNameHint) + if err != nil { + return admission.NewForbidden(a, err) + } + if allowedPod != nil && apiequality.Semantic.DeepEqual(pod, allowedPod) { + return nil + } + + // we didn't validate against any provider, reject the pod and give the errors for each attempt + klog.V(4).Infof("unable to validate pod %s (generate: %s) in namespace %s against any security context constraint: %v", pod.Name, pod.GenerateName, a.GetNamespace(), validationErrs) + return admission.NewForbidden(a, fmt.Errorf("unable to validate against any security context constraint: %v", validationErrs)) +} + +// these are the SCCs created by the cluster-kube-apiserver-operator. +// see the list in https://github.com/openshift/cluster-kube-apiserver-operator/blob/3b0218cf9778cbcf2650ad5aa4e01d7b40a2d05e/bindata/bootkube/scc-manifests/0000_20_kube-apiserver-operator_00_scc-restricted.yaml +// if these are not present, the lister isn't really finished listing. +var standardSCCNames = sets.NewString( + "anyuid", + "hostaccess", + "hostmount-anyuid", + "hostnetwork", + "hostnetwork-v2", + "nonroot", + "nonroot-v2", + "privileged", + "restricted", + "restricted-v2", +) + +func requireStandardSCCs(sccs []*securityv1.SecurityContextConstraints, err error) error { + if err != nil { + return err + } + + allCurrentSCCNames := sets.NewString() + for _, curr := range sccs { + allCurrentSCCNames.Insert(curr.Name) + } + + missingSCCs := standardSCCNames.Difference(allCurrentSCCNames) + if len(missingSCCs) == 0 { + return nil + } + + return fmt.Errorf("securitycontextconstraints.security.openshift.io cache is missing %v", strings.Join(missingSCCs.List(), ", ")) +} + +func (c *constraint) areListersSynced() bool { + for _, syncFunc := range c.listersSynced { + if !syncFunc() { + return false + } + } + return true +} + +func (c *constraint) computeSecurityContext( + ctx context.Context, + a admission.Attributes, + pod *coreapi.Pod, + specMutationAllowed, ephemeralContainersMutationAllowed bool, + requiredSCCName, validatedSCCHint string, +) (*coreapi.Pod, string, field.ErrorList, error) { + // get all constraints that are usable by the user + klog.V(4).Infof("getting security context constraints for pod %s (generate: %s) in namespace %s with user info %v", pod.Name, pod.GenerateName, a.GetNamespace(), a.GetUserInfo()) + + err := wait.PollImmediateWithContext(ctx, 1*time.Second, 10*time.Second, func(context.Context) (bool, error) { + return c.areListersSynced(), nil + }) + if err != nil { + return nil, "", nil, admission.NewForbidden(a, fmt.Errorf("securitycontextconstraints.security.openshift.io cache is not synchronized")) + } + + // wait a few seconds until the synchronized list returns all the required SCCs created by the kas-o. + // If this doesn't happen, then indicate which ones are missing. This seems odd, but our CI system suggests that this happens occasionally. + // If the SCCs were all deleted, then no pod will pass SCC admission until the SCCs are recreated, but the kas-o (which recreates them) + // bypasses SCC admission, so this does not create a cycle. + var requiredSCCErr error + err = wait.PollImmediateWithContext(ctx, 1*time.Second, 10*time.Second, func(context.Context) (bool, error) { + if requiredSCCErr = requireStandardSCCs(c.sccLister.List(labels.Everything())); requiredSCCErr != nil { + return false, nil + } + return true, nil + }) + if err != nil { + if requiredSCCErr != nil { + return nil, "", nil, admission.NewForbidden(a, requiredSCCErr) + } + return nil, "", nil, admission.NewForbidden(a, fmt.Errorf("securitycontextconstraints.security.openshift.io required check failed oddly")) + } + + var constraints []*securityv1.SecurityContextConstraints + if len(requiredSCCName) > 0 { + requiredSCC, err := c.sccLister.Get(requiredSCCName) + if err != nil { + return nil, "", nil, admission.NewForbidden(a, fmt.Errorf("failed to retrieve the required SCC %q: %w", requiredSCCName, err)) + } + constraints = []*securityv1.SecurityContextConstraints{requiredSCC} + } else { + constraints, err = c.sccLister.List(labels.Everything()) + if err != nil { + return nil, "", nil, admission.NewForbidden(a, err) + } + } + + if len(constraints) == 0 { + return nil, "", nil, admission.NewForbidden(a, fmt.Errorf("no SecurityContextConstraints found in cluster")) + } + sort.Sort(sccsort.ByPriority(constraints)) + + // If mutation is not allowed and validatedSCCHint is provided, check the validated policy first. + // Keep the order the same for everything else + sort.SliceStable(constraints, func(i, j int) bool { + // disregard the ephemeral containers here, the rest of the pod should still + // not get mutated and so we are primarily interested in the SCC that matched previously + if !specMutationAllowed { + if constraints[i].Name == validatedSCCHint { + return true + } + if constraints[j].Name == validatedSCCHint { + return false + } + } + return i < j + }) + + providers, errs := sccmatching.CreateProvidersFromConstraints(ctx, a.GetNamespace(), constraints, c.namespaceLister) + logProviders(pod, providers, errs) + if len(errs) > 0 { + return nil, "", nil, kutilerrors.NewAggregate(errs) + } + + if len(providers) == 0 { + return nil, "", nil, admission.NewForbidden(a, fmt.Errorf("no SecurityContextConstraintsProvider available to validate pod request")) + } + + // all containers in a single pod must validate under a single provider or we will reject the request + var ( + allowedPod *coreapi.Pod + allowingProvider sccmatching.SecurityContextConstraintsProvider + validationErrs field.ErrorList + saUserInfo user.Info + ) + + userInfo := a.GetUserInfo() + if len(pod.Spec.ServiceAccountName) > 0 { + saUserInfo = serviceaccount.UserInfo(a.GetNamespace(), pod.Spec.ServiceAccountName, "") + } + + allowedForUserOrSA := func(provider sccmatching.SecurityContextConstraintsProvider) bool { + sccName := provider.GetSCCName() + sccUsers := provider.GetSCCUsers() + sccGroups := provider.GetSCCGroups() + return sccmatching.ConstraintAppliesTo(ctx, sccName, sccUsers, sccGroups, userInfo, a.GetNamespace(), c.authorizer) || + (saUserInfo != nil && sccmatching.ConstraintAppliesTo(ctx, sccName, sccUsers, sccGroups, saUserInfo, a.GetNamespace(), c.authorizer)) + } + + appliesToPod := func(provider sccmatching.SecurityContextConstraintsProvider, pod *coreapi.Pod) (podCopy *coreapi.Pod, errs field.ErrorList) { + podCopy = pod.DeepCopy() + if errs := sccmatching.AssignSecurityContext(provider, podCopy, field.NewPath(fmt.Sprintf("provider %s: ", provider.GetSCCName()))); len(errs) > 0 { + return nil, errs + } + return podCopy, nil + } + + var ( + restrictedSCCProvider sccmatching.SecurityContextConstraintsProvider + restrictedV2SCCProvider sccmatching.SecurityContextConstraintsProvider + provider sccmatching.SecurityContextConstraintsProvider + denied = []string{} + failures = map[string]string{} + i int + ) +loop: + for i, provider = range providers { + switch provider.GetSCCName() { + case "restricted": + restrictedSCCProvider = providers[i] + case "restricted-v2": + restrictedV2SCCProvider = providers[i] + } + + if !allowedForUserOrSA(provider) { + denied = append(denied, provider.GetSCCName()) + // this will cause every security context constraint attempted, in order, to the failure + validationErrs = append(validationErrs, + field.Forbidden( + field.NewPath(fmt.Sprintf("provider %q", provider.GetSCCName())), + "not usable by user or serviceaccount", + ), + ) + + continue + } + + podCopy, errs := appliesToPod(provider, pod) + if len(errs) > 0 { + validationErrs = append(validationErrs, errs...) + failures[provider.GetSCCName()] = errs.ToAggregate().Error() + continue + } + + // the entire pod validated + switch { + case specMutationAllowed: + // if mutation is allowed, use the first found SCC that allows the pod. + // This behavior is different from Kubernetes which tries to search a non-mutating provider + // even on creating. We prefer most restrictive SCC in this case even if it mutates a pod. + allowedPod = podCopy + allowingProvider = provider + klog.V(5).Infof("pod %s (generate: %s) validated against provider %s with mutation", pod.Name, pod.GenerateName, provider.GetSCCName()) + break loop + case ephemeralContainersMutationAllowed: + podCopyCopy := podCopy.DeepCopy() + // check if, possibly, only the ephemeral containers were mutated + podCopyCopy.Spec.EphemeralContainers = pod.Spec.EphemeralContainers + if apiequality.Semantic.DeepEqual(pod, podCopyCopy) { + allowedPod = podCopy + allowingProvider = provider + klog.V(5).Infof("pod %s (generate: %s) validated against provider %s with ephemeralContainers mutation", pod.Name, pod.GenerateName, provider.GetSCCName()) + break loop + } + klog.V(5).Infof("pod %s (generate: %s) validated against provider %s, but required pod mutation outside ephemeralContainers, skipping", pod.Name, pod.GenerateName, provider.GetSCCName()) + failures[provider.GetSCCName()] = "failures final validation after mutating admission" + case apiequality.Semantic.DeepEqual(pod, podCopy): + // if we don't allow mutation, only use the validated pod if it didn't require any spec changes + allowedPod = podCopy + allowingProvider = provider + klog.V(5).Infof("pod %s (generate: %s) validated against provider %s without mutation", pod.Name, pod.GenerateName, provider.GetSCCName()) + break loop + default: + klog.V(5).Infof("pod %s (generate: %s) validated against provider %s, but required mutation, skipping", pod.Name, pod.GenerateName, provider.GetSCCName()) + failures[provider.GetSCCName()] = "failures final validation after mutating admission" + } + } + + // if we have restricted-v2, and we're not allowed (this means restricted-v2 did not match) and the user cannot use restricted-v1 + // then we should check to see if restricted-v1 would allow the pod. If so, prepend a specific failure message. + userCannotUseForRestricted := sets.NewString(denied...).Has("restricted") + hasRestrictedV2 := restrictedV2SCCProvider != nil + isAllowed := allowingProvider != nil + if hasRestrictedV2 && !isAllowed && userCannotUseForRestricted { + // restrictedSCCProvider is never nil because the loop above only adds "restricted" to the denied list if it found "restricted" + _, restrictedErrs := appliesToPod(restrictedSCCProvider, pod) + if len(restrictedErrs) == 0 { + // this means that restricted-v1 works, so we should indicate that the pod would have been admitted otherwise + validationErrs = append(validationErrs, + field.Forbidden( + field.NewPath("no access to scc/restricted"), + "the pod fails to validate against the `restricted-v2` security context constraint, "+ + "but would validate successfully against the `restricted` security context constraint", + ), + ) + } + } + + // add audit annotations + if specMutationAllowed { + // find next provider that was not chosen + var nextNotChosenProvider sccmatching.SecurityContextConstraintsProvider + for _, provider := range providers[i+1:] { + if !allowedForUserOrSA(provider) { + continue + } + if _, errs := appliesToPod(provider, pod); len(errs) == 0 { + nextNotChosenProvider = provider + break + } + } + + a.AddAnnotation("securityserviceconstraints.admission.openshift.io/denied", strings.Join(denied, ",")) + for sccName, reason := range failures { + a.AddAnnotation(fmt.Sprintf("securitycontextconstraints.admission.openshift.io/too-restrictive-%s", sccName), reason) + } + + if allowingProvider != nil && nextNotChosenProvider != nil { + chosen := allowingProvider.GetSCC() + next := nextNotChosenProvider.GetSCC() + if chosen != nil && next != nil { + _, reason := sccsort.ByPriority([]*securityv1.SecurityContextConstraints{chosen, next}).LessWithReason(0, 1) + if len(reason) == 0 { + reason = "unknown" + } else { + reason = fmt.Sprintf("%q is most restrictive, not denied, and chosen over %q because %q %s", chosen.Name, next.Name, chosen.Name, reason) + } + a.AddAnnotation("securitycontextconstraints.admission.openshift.io/reason", reason) + } + } else if allowingProvider != nil { + a.AddAnnotation("securitycontextconstraints.admission.openshift.io/reason", fmt.Sprintf("%q is the only one not too restrictive and not denied", allowingProvider.GetSCCName())) + } else if len(failures) == 0 { + a.AddAnnotation("securitycontextconstraints.admission.openshift.io/reason", "all denied") + } else { + a.AddAnnotation("securitycontextconstraints.admission.openshift.io/reason", "all too restrictive or denied") + } + } else if len(validatedSCCHint) != 0 && (allowingProvider == nil || allowingProvider.GetSCCName() != validatedSCCHint) { + if reason, ok := failures[validatedSCCHint]; ok { + a.AddAnnotation(fmt.Sprintf("securitycontextconstraints.admission.openshift.io/too-restrictive-%s", validatedSCCHint), reason) + } else { + a.AddAnnotation("securitycontextconstraints.admission.openshift.io/denied-validation", fmt.Sprintf("originally chosen %q got denied in final validation after mutating admission", validatedSCCHint)) + } + + if allowingProvider != nil { + a.AddAnnotation("securitycontextconstraints.admission.openshift.io/reason-validation", fmt.Sprintf("originally chosen %q did not pass final validation after mutating admission, but %q did instead", validatedSCCHint, allowingProvider.GetSCCName())) + } else { + a.AddAnnotation("securitycontextconstraints.admission.openshift.io/denied-validation", fmt.Sprintf("originally chosen %q got denied in final validation after mutating admission, and no other matched", validatedSCCHint)) + } + } + + if allowedPod == nil || allowingProvider == nil { + return nil, "", validationErrs, nil + } + + if !specMutationAllowed { + // the finally chosen SCC. Note that we are not allowed to set an annotation multiple times, hence only for !specMutationAllowed + a.AddAnnotation("securitycontextconstraints.admission.openshift.io/chosen", allowingProvider.GetSCCName()) + } + + return allowedPod, allowingProvider.GetSCCName(), validationErrs, nil +} + +var ignoredSubresources = sets.NewString( + "exec", + "attach", + "binding", + "eviction", + "log", + "portforward", + "proxy", + "status", +) + +var ignoredAnnotations = sets.NewString( + "k8s.ovn.org/pod-networks", +) + +func shouldIgnore(a admission.Attributes) (bool, error) { + if a.GetResource().GroupResource() != coreapi.Resource("pods") { + return true, nil + } + + if subresource := a.GetSubresource(); len(subresource) != 0 && ignoredSubresources.Has(subresource) { + return true, nil + } + + pod, ok := a.GetObject().(*coreapi.Pod) + // if we can't convert then fail closed since we've already checked that this is supposed to be a pod object. + // this shouldn't normally happen during admission but could happen if an integrator passes a versioned + // pod object rather than an internal object. + if !ok { + return false, admission.NewForbidden(a, fmt.Errorf("object was marked as kind pod but was unable to be converted: %v", a.GetObject())) + } + // ignore all Windows pods + // TODO: This can be expanded to other OS'es later if needed + if pod.Spec.OS != nil && pod.Spec.OS.Name == coreapi.Windows { + return true, nil + } + + if a.GetOperation() == admission.Update { + oldPod, ok := a.GetOldObject().(*coreapi.Pod) + if !ok { + return false, admission.NewForbidden(a, fmt.Errorf("object was marked as kind pod but was unable to be converted: %v", a.GetOldObject())) + } + + // never ignore any spec changes + if !kapihelper.Semantic.DeepEqual(pod.Spec, oldPod.Spec) { + return false, nil + } + + // see if we are only doing meta changes that should be ignored during admission + // for example, the OVN controller adds informative networking annotations that shouldn't cause the pod to go through admission again + if shouldIgnoreMetaChanges(pod, oldPod) { + return true, nil + } + } + + return false, nil +} + +func shouldIgnoreMetaChanges(newPod, oldPod *coreapi.Pod) bool { + // check if we're adding or changing only annotations from the ignore list + for key, newVal := range newPod.ObjectMeta.Annotations { + if oldVal, ok := oldPod.ObjectMeta.Annotations[key]; ok && newVal == oldVal { + continue + } + + if !ignoredAnnotations.Has(key) { + return false + } + } + + // check if we're removing only annotations from the ignore list + for key := range oldPod.ObjectMeta.Annotations { + if _, ok := newPod.ObjectMeta.Annotations[key]; ok { + continue + } + + if !ignoredAnnotations.Has(key) { + return false + } + } + + newPodCopy := newPod.DeepCopyObject() + newPodCopyMeta, err := meta.Accessor(newPodCopy) + if err != nil { + return false + } + newPodCopyMeta.SetAnnotations(oldPod.ObjectMeta.Annotations) + + // see if we are only updating the ownerRef. Garbage collection does this + // and we should allow it in general, since you had the power to update and the power to delete. + // The worst that happens is that you delete something, but you aren't controlling the privileged object itself + res := rbacregistry.IsOnlyMutatingGCFields(newPodCopy, oldPod, kapihelper.Semantic) + + return res +} + +// SetSecurityInformers implements WantsSecurityInformer interface for constraint. +func (c *constraint) SetSecurityInformers(informers securityv1informer.SecurityContextConstraintsInformer) { + c.sccLister = informers.Lister() + c.listersSynced = append(c.listersSynced, informers.Informer().HasSynced) +} + +func (c *constraint) SetExternalKubeInformerFactory(informers informers.SharedInformerFactory) { + c.namespaceLister = informers.Core().V1().Namespaces().Lister() + c.listersSynced = append(c.listersSynced, informers.Core().V1().Namespaces().Informer().HasSynced) +} + +func (c *constraint) SetAuthorizer(authorizer authorizer.Authorizer) { + c.authorizer = authorizer +} + +// ValidateInitialization implements InitializationValidator interface for constraint. +func (c *constraint) ValidateInitialization() error { + if c.sccLister == nil { + return fmt.Errorf("%s requires an sccLister", PluginName) + } + if c.listersSynced == nil { + return fmt.Errorf("%s requires an sccSynced", PluginName) + } + if c.namespaceLister == nil { + return fmt.Errorf("%s requires a namespaceLister", PluginName) + } + if c.authorizer == nil { + return fmt.Errorf("%s requires an authorizer", PluginName) + } + return nil +} + +// logProviders logs what providers were found for the pod as well as any errors that were encountered +// while creating providers. +func logProviders(pod *coreapi.Pod, providers []sccmatching.SecurityContextConstraintsProvider, providerCreationErrs []error) { + names := make([]string, len(providers)) + for i, p := range providers { + names[i] = p.GetSCCName() + } + klog.V(4).Infof("validating pod %s (generate: %s) against providers %s", pod.Name, pod.GenerateName, strings.Join(names, ",")) + + for _, err := range providerCreationErrs { + klog.V(2).Infof("provider creation error: %v", err) + } +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccadmission/intializers.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccadmission/intializers.go new file mode 100644 index 0000000000000..287fe08b870bc --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccadmission/intializers.go @@ -0,0 +1,28 @@ +package sccadmission + +import ( + "k8s.io/apiserver/pkg/admission" + + securityv1informer "github.com/openshift/client-go/security/informers/externalversions/security/v1" +) + +func NewInitializer(sccInformer securityv1informer.SecurityContextConstraintsInformer) admission.PluginInitializer { + return &localInitializer{sccInformer: sccInformer} +} + +type WantsSecurityInformer interface { + SetSecurityInformers(securityv1informer.SecurityContextConstraintsInformer) + admission.InitializationValidator +} + +type localInitializer struct { + sccInformer securityv1informer.SecurityContextConstraintsInformer +} + +// Initialize will check the initialization interfaces implemented by each plugin +// and provide the appropriate initialization data +func (i *localInitializer) Initialize(plugin admission.Interface) { + if wants, ok := plugin.(WantsSecurityInformer); ok { + wants.SetSecurityInformers(i.sccInformer) + } +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccadmission/scc_exec.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccadmission/scc_exec.go new file mode 100644 index 0000000000000..666bb14a22ddb --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccadmission/scc_exec.go @@ -0,0 +1,108 @@ +package sccadmission + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + coreapi "k8s.io/kubernetes/pkg/apis/core" + coreapiv1conversions "k8s.io/kubernetes/pkg/apis/core/v1" + + securityv1informers "github.com/openshift/client-go/security/informers/externalversions/security/v1" +) + +func RegisterSCCExecRestrictions(plugins *admission.Plugins) { + plugins.Register("security.openshift.io/SCCExecRestrictions", + func(config io.Reader) (admission.Interface, error) { + execAdmitter := NewSCCExecRestrictions() + return execAdmitter, nil + }) +} + +var ( + _ = initializer.WantsAuthorizer(&sccExecRestrictions{}) + _ = initializer.WantsExternalKubeClientSet(&sccExecRestrictions{}) + _ = WantsSecurityInformer(&sccExecRestrictions{}) + _ = admission.ValidationInterface(&sccExecRestrictions{}) +) + +// sccExecRestrictions is an implementation of admission.ValidationInterface which says no to a pod/exec on +// a pod that the user would not be allowed to create +type sccExecRestrictions struct { + *admission.Handler + constraintAdmission *constraint + client kubernetes.Interface +} + +func (d *sccExecRestrictions) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) (err error) { + if a.GetOperation() != admission.Connect { + return nil + } + if a.GetResource().GroupResource() != coreapi.Resource("pods") { + return nil + } + if a.GetSubresource() != "attach" && a.GetSubresource() != "exec" { + return nil + } + + pod, err := d.client.CoreV1().Pods(a.GetNamespace()).Get(context.TODO(), a.GetName(), metav1.GetOptions{}) + switch { + case errors.IsNotFound(err): + return admission.NewNotFound(a) + case err != nil: + return admission.NewForbidden(a, fmt.Errorf("failed to get pod: %v", err)) + } + + // we have to convert to the internal pod because admission uses internal types for now + internalPod := &coreapi.Pod{} + if err := coreapiv1conversions.Convert_v1_Pod_To_core_Pod(pod, internalPod, nil); err != nil { + return admission.NewForbidden(a, err) + } + + // TODO, if we want to actually limit who can use which service account, then we'll need to add logic here to make sure that + // we're allowed to use the SA the pod is using. Otherwise, user-A creates pod and user-B (who can't use the SA) can exec into it. + createAttributes := admission.NewAttributesRecord(internalPod, nil, coreapi.Kind("Pod").WithVersion(""), a.GetNamespace(), a.GetName(), a.GetResource(), "", admission.Create, nil, false, a.GetUserInfo()) + // call SCC.Admit instead of SCC.Validate because we accept that a different SCC is chosen. SCC.Validate would require + // that the chosen SCC (stored in the "openshift.io/scc" annotation) does not change. + if err := d.constraintAdmission.Admit(ctx, createAttributes, o); err != nil { + return admission.NewForbidden(a, fmt.Errorf("%s operation is not allowed because the pod's security context exceeds your permissions: %v", a.GetSubresource(), err)) + } + + return nil +} + +// NewSCCExecRestrictions creates a new admission controller that denies an exec operation on a privileged pod +func NewSCCExecRestrictions() *sccExecRestrictions { + return &sccExecRestrictions{ + Handler: admission.NewHandler(admission.Connect), + constraintAdmission: NewConstraint(), + } +} + +func (d *sccExecRestrictions) SetExternalKubeClientSet(c kubernetes.Interface) { + d.client = c +} + +func (d *sccExecRestrictions) SetExternalKubeInformerFactory(informers informers.SharedInformerFactory) { + d.constraintAdmission.SetExternalKubeInformerFactory(informers) +} + +func (d *sccExecRestrictions) SetSecurityInformers(informers securityv1informers.SecurityContextConstraintsInformer) { + d.constraintAdmission.SetSecurityInformers(informers) +} + +func (d *sccExecRestrictions) SetAuthorizer(authorizer authorizer.Authorizer) { + d.constraintAdmission.SetAuthorizer(authorizer) +} + +// Validate defines actions to validate sccExecRestrictions +func (d *sccExecRestrictions) ValidateInitialization() error { + return d.constraintAdmission.ValidateInitialization() +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccmatching/matcher.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccmatching/matcher.go new file mode 100644 index 0000000000000..09a084483d631 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccmatching/matcher.go @@ -0,0 +1,419 @@ +package sccmatching + +import ( + "context" + "fmt" + "sort" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/klog/v2" + kapi "k8s.io/kubernetes/pkg/apis/core" + podhelpers "k8s.io/kubernetes/pkg/apis/core/pods" + + "github.com/openshift/api/security" + securityv1 "github.com/openshift/api/security/v1" + sccsort "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util/sort" + securityv1listers "github.com/openshift/client-go/security/listers/security/v1" + "github.com/openshift/library-go/pkg/security/uid" +) + +type SCCMatcher interface { + FindApplicableSCCs(ctx context.Context, namespace string, user ...user.Info) ([]*securityv1.SecurityContextConstraints, error) +} + +type defaultSCCMatcher struct { + cache securityv1listers.SecurityContextConstraintsLister + authorizer authorizer.Authorizer +} + +func NewDefaultSCCMatcher(c securityv1listers.SecurityContextConstraintsLister, authorizer authorizer.Authorizer) SCCMatcher { + return &defaultSCCMatcher{cache: c, authorizer: authorizer} +} + +// FindApplicableSCCs implements SCCMatcher interface +// It finds all SCCs that the subjects in the `users` argument may use for the given `namespace`. +// If `users` is omitted, `namespace` is ignored. +// The returned SCCs are sorted by priority. +func (d *defaultSCCMatcher) FindApplicableSCCs(ctx context.Context, namespace string, users ...user.Info) ([]*securityv1.SecurityContextConstraints, error) { + var matchedConstraints []*securityv1.SecurityContextConstraints + constraints, err := d.cache.List(labels.Everything()) + if err != nil { + return nil, err + } + + // filter out SCCs if we got some users, leave as is if not + if len(users) == 0 { + matchedConstraints = constraints + } else { + for _, constraint := range constraints { + for _, user := range users { + if ConstraintAppliesTo(ctx, constraint.Name, constraint.Users, constraint.Groups, user, namespace, d.authorizer) { + matchedConstraints = append(matchedConstraints, constraint) + break + } + } + } + } + + sort.Sort(sccsort.ByPriority(matchedConstraints)) + + return matchedConstraints, nil +} + +// authorizedForSCC returns true if info is authorized to perform the "use" verb on the SCC resource. +func authorizedForSCC(ctx context.Context, sccName string, info user.Info, namespace string, a authorizer.Authorizer) bool { + // check against the namespace that the pod is being created in to allow per-namespace SCC grants. + attr := authorizer.AttributesRecord{ + User: info, + Verb: "use", + Namespace: namespace, + Name: sccName, + APIGroup: security.GroupName, + Resource: "securitycontextconstraints", + ResourceRequest: true, + } + decision, reason, err := a.Authorize(ctx, attr) + if err != nil { + klog.V(5).Infof("cannot authorize for SCC: %v %q %v", decision, reason, err) + return false + } + return decision == authorizer.DecisionAllow +} + +// ConstraintAppliesTo inspects the constraint's users and groups against the userInfo to determine +// if it is usable by the userInfo. +// Anything we do here needs to work with a deny authorizer so the choices are limited to SAR / Authorizer +func ConstraintAppliesTo(ctx context.Context, sccName string, sccUsers, sccGroups []string, userInfo user.Info, namespace string, a authorizer.Authorizer) bool { + for _, user := range sccUsers { + if userInfo.GetName() == user { + return true + } + } + for _, userGroup := range userInfo.GetGroups() { + if constraintSupportsGroup(userGroup, sccGroups) { + return true + } + } + if a != nil { + return authorizedForSCC(ctx, sccName, userInfo, namespace, a) + } + return false +} + +// AssignSecurityContext creates a security context for each container in the pod +// and validates that the sc falls within the scc constraints. All containers must validate against +// the same scc or is not considered valid. +func AssignSecurityContext(provider SecurityContextConstraintsProvider, pod *kapi.Pod, fldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + + psc, generatedAnnotations, err := provider.CreatePodSecurityContext(pod) + if err != nil { + errs = append(errs, field.Invalid(fldPath.Child("spec", "securityContext"), pod.Spec.SecurityContext, err.Error())) + } + + pod.Spec.SecurityContext = psc + pod.Annotations = generatedAnnotations + errs = append(errs, provider.ValidatePodSecurityContext(pod, fldPath.Child("spec", "securityContext"))...) + + podhelpers.VisitContainersWithPath(&pod.Spec, fldPath, func(container *kapi.Container, path *field.Path) bool { + errs = append(errs, assignContainerSecurityContext(provider, pod, container, path)...) + return true + }) + + if len(errs) > 0 { + return errs + } + + return nil +} + +func assignContainerSecurityContext(provider SecurityContextConstraintsProvider, pod *kapi.Pod, container *kapi.Container, fldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + sc, err := provider.CreateContainerSecurityContext(pod, container) + if err != nil { + errs = append(errs, field.Invalid(fldPath, "", err.Error())) + return errs + } + container.SecurityContext = sc + errs = append(errs, provider.ValidateContainerSecurityContext(pod, container, fldPath)...) + + if len(errs) > 0 { + return errs + } + + return nil +} + +// constraintSupportsGroup checks that group is in constraintGroups. +func constraintSupportsGroup(group string, constraintGroups []string) bool { + for _, g := range constraintGroups { + if g == group { + return true + } + } + return false +} + +// CreateProvidersFromConstraints creates providers from the constraints supplied, including +// looking up pre-allocated values if necessary using the pod's namespace. +func CreateProvidersFromConstraints(ctx context.Context, namespaceName string, sccs []*securityv1.SecurityContextConstraints, namespaceLister corev1listers.NamespaceLister) ([]SecurityContextConstraintsProvider, []error) { + var ( + // namespace is declared here for reuse but we will not fetch it unless required by the matched constraints + namespace *corev1.Namespace + // collected providers + providers []SecurityContextConstraintsProvider + // collected errors to return + errs []error + ) + + // because we're willing to wait for 10s on a single request, we only use the namespace lister, not a live lookup. + var lastErr error + err := wait.PollImmediateWithContext(ctx, 1*time.Second, 10*time.Second, func(ctx context.Context) (bool, error) { + namespace, lastErr = namespaceLister.Get(namespaceName) + if lastErr != nil { + return false, nil + } + + if _, ok := namespace.GetAnnotations()[securityv1.UIDRangeAnnotation]; !ok { + lastErr = fmt.Errorf("unable to find annotation %s", securityv1.UIDRangeAnnotation) + return false, nil + } + + if _, ok := namespace.GetAnnotations()[securityv1.MCSAnnotation]; !ok { + lastErr = fmt.Errorf("unable to find annotation %s", securityv1.MCSAnnotation) + return false, nil + } + + return true, nil + }) + if err != nil { + if lastErr != nil { + return nil, []error{fmt.Errorf("error fetching namespace %q: %w", namespaceName, lastErr)} + } + return nil, []error{fmt.Errorf("error fetching namespace %q: %w", namespaceName, err)} + } + + // set pre-allocated values on constraints + for _, constraint := range sccs { + var ( + provider SecurityContextConstraintsProvider + err error + ) + provider, err = CreateProviderFromConstraint(namespace, constraint) + if err != nil { + errs = append(errs, err) + continue + } + providers = append(providers, provider) + } + return providers, errs +} + +// CreateProviderFromConstraint creates a SecurityContextConstraintProvider from a SecurityContextConstraint +func CreateProviderFromConstraint(namespace *corev1.Namespace, constraint *securityv1.SecurityContextConstraints) (SecurityContextConstraintsProvider, error) { + var err error + + // Make a copy of the constraint so we don't mutate the store's cache + constraint = constraint.DeepCopy() + + // Resolve the values from the namespace + if requiresPreAllocatedUIDRange(constraint) { + constraint.RunAsUser.UIDRangeMin, constraint.RunAsUser.UIDRangeMax, err = getPreallocatedUIDRange(namespace) + if err != nil { + return nil, fmt.Errorf("unable to find pre-allocated uid annotation for namespace %s while trying to configure SCC %s: %v", namespace.Name, constraint.Name, err) + } + } + if requiresPreAllocatedSELinuxLevel(constraint) { + var level string + if level, err = getPreallocatedLevel(namespace); err != nil { + return nil, fmt.Errorf("unable to find pre-allocated mcs annotation for namespace %s while trying to configure SCC %s: %v", namespace.Name, constraint.Name, err) + } + + if constraint.SELinuxContext.SELinuxOptions == nil { + constraint.SELinuxContext.SELinuxOptions = &corev1.SELinuxOptions{} + } + constraint.SELinuxContext.SELinuxOptions.Level = level + } + if requiresPreallocatedFSGroup(constraint) { + fsGroup, err := getPreallocatedFSGroup(namespace) + if err != nil { + return nil, fmt.Errorf("unable to find pre-allocated group annotation for namespace %s while trying to configure SCC %s: %v", namespace.Name, constraint.Name, err) + } + constraint.FSGroup.Ranges = fsGroup + } + if requiresPreallocatedSupplementalGroups(constraint) { + supplementalGroups, err := getPreallocatedSupplementalGroups(namespace) + if err != nil { + return nil, fmt.Errorf("unable to find pre-allocated group annotation for namespace %s while trying to configure SCC %s: %v", namespace.Name, constraint.Name, err) + } + constraint.SupplementalGroups.Ranges = supplementalGroups + } + + // Create the provider + provider, err := NewSimpleProvider(constraint) + if err != nil { + return nil, fmt.Errorf("error creating provider for SCC %s in namespace %s: %v", constraint.Name, namespace.GetName(), err) + } + return provider, nil +} + +// getPreallocatedUIDRange retrieves the annotated value from the namespace, splits it to make +// the min/max and formats the data into the necessary types for the strategy options. +func getPreallocatedUIDRange(ns *corev1.Namespace) (*int64, *int64, error) { + annotationVal, ok := ns.Annotations[securityv1.UIDRangeAnnotation] + if !ok { + return nil, nil, fmt.Errorf("unable to find annotation %s", securityv1.UIDRangeAnnotation) + } + if len(annotationVal) == 0 { + return nil, nil, fmt.Errorf("found annotation %s but it was empty", securityv1.UIDRangeAnnotation) + } + uidBlock, err := uid.ParseBlock(annotationVal) + if err != nil { + return nil, nil, err + } + + var min int64 = int64(uidBlock.Start) + var max int64 = int64(uidBlock.End) + klog.V(4).Infof("got preallocated values for min: %d, max: %d for uid range in namespace %s", min, max, ns.Name) + return &min, &max, nil +} + +// getPreallocatedLevel gets the annotated value from the namespace. +func getPreallocatedLevel(ns *corev1.Namespace) (string, error) { + level, ok := ns.Annotations[securityv1.MCSAnnotation] + if !ok { + return "", fmt.Errorf("unable to find annotation %s", securityv1.MCSAnnotation) + } + if len(level) == 0 { + return "", fmt.Errorf("found annotation %s but it was empty", securityv1.MCSAnnotation) + } + klog.V(4).Infof("got preallocated value for level: %s for selinux options in namespace %s", level, ns.Name) + return level, nil +} + +// getSupplementalGroupsAnnotation provides a backwards compatible way to get supplemental groups +// annotations from a namespace by looking for SupplementalGroupsAnnotation and falling back to +// UIDRangeAnnotation if it is not found. +func getSupplementalGroupsAnnotation(ns *corev1.Namespace) (string, error) { + groups, ok := ns.Annotations[securityv1.SupplementalGroupsAnnotation] + if !ok { + klog.V(4).Infof("unable to find supplemental group annotation %s falling back to %s", securityv1.SupplementalGroupsAnnotation, securityv1.UIDRangeAnnotation) + + groups, ok = ns.Annotations[securityv1.UIDRangeAnnotation] + if !ok { + return "", fmt.Errorf("unable to find supplemental group or uid annotation for namespace %s", ns.Name) + } + } + + if len(groups) == 0 { + return "", fmt.Errorf("unable to find groups using %s and %s annotations", securityv1.SupplementalGroupsAnnotation, securityv1.UIDRangeAnnotation) + } + return groups, nil +} + +// getPreallocatedFSGroup gets the annotated value from the namespace. +func getPreallocatedFSGroup(ns *corev1.Namespace) ([]securityv1.IDRange, error) { + groups, err := getSupplementalGroupsAnnotation(ns) + if err != nil { + return nil, err + } + klog.V(4).Infof("got preallocated value for groups: %s in namespace %s", groups, ns.Name) + + blocks, err := parseSupplementalGroupAnnotation(groups) + if err != nil { + return nil, err + } + return []securityv1.IDRange{ + { + Min: int64(blocks[0].Start), + Max: int64(blocks[0].Start), + }, + }, nil +} + +// getPreallocatedSupplementalGroups gets the annotated value from the namespace. +func getPreallocatedSupplementalGroups(ns *corev1.Namespace) ([]securityv1.IDRange, error) { + groups, err := getSupplementalGroupsAnnotation(ns) + if err != nil { + return nil, err + } + klog.V(4).Infof("got preallocated value for groups: %s in namespace %s", groups, ns.Name) + + blocks, err := parseSupplementalGroupAnnotation(groups) + if err != nil { + return nil, err + } + + idRanges := []securityv1.IDRange{} + for _, block := range blocks { + rng := securityv1.IDRange{ + Min: int64(block.Start), + Max: int64(block.End), + } + idRanges = append(idRanges, rng) + } + return idRanges, nil +} + +// parseSupplementalGroupAnnotation parses the group annotation into blocks. +func parseSupplementalGroupAnnotation(groups string) ([]uid.Block, error) { + blocks := []uid.Block{} + segments := strings.Split(groups, ",") + for _, segment := range segments { + block, err := uid.ParseBlock(segment) + if err != nil { + return nil, err + } + blocks = append(blocks, block) + } + if len(blocks) == 0 { + return nil, fmt.Errorf("no blocks parsed from annotation %s", groups) + } + return blocks, nil +} + +// requiresPreAllocatedUIDRange returns true if the strategy is must run in range and the min or max +// is not set. +func requiresPreAllocatedUIDRange(constraint *securityv1.SecurityContextConstraints) bool { + if constraint.RunAsUser.Type != securityv1.RunAsUserStrategyMustRunAsRange { + return false + } + return constraint.RunAsUser.UIDRangeMin == nil && constraint.RunAsUser.UIDRangeMax == nil +} + +// requiresPreAllocatedSELinuxLevel returns true if the strategy is must run as and the level is not set. +func requiresPreAllocatedSELinuxLevel(constraint *securityv1.SecurityContextConstraints) bool { + if constraint.SELinuxContext.Type != securityv1.SELinuxStrategyMustRunAs { + return false + } + if constraint.SELinuxContext.SELinuxOptions == nil { + return true + } + return constraint.SELinuxContext.SELinuxOptions.Level == "" +} + +// requiresPreAllocatedSELinuxLevel returns true if the strategy is must run as and there is no +// range specified. +func requiresPreallocatedSupplementalGroups(constraint *securityv1.SecurityContextConstraints) bool { + if constraint.SupplementalGroups.Type != securityv1.SupplementalGroupsStrategyMustRunAs { + return false + } + return len(constraint.SupplementalGroups.Ranges) == 0 +} + +// requiresPreallocatedFSGroup returns true if the strategy is must run as and there is no +// range specified. +func requiresPreallocatedFSGroup(constraint *securityv1.SecurityContextConstraints) bool { + if constraint.FSGroup.Type != securityv1.FSGroupStrategyMustRunAs { + return false + } + return len(constraint.FSGroup.Ranges) == 0 +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccmatching/provider.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccmatching/provider.go new file mode 100644 index 0000000000000..6acd3913d2c2d --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccmatching/provider.go @@ -0,0 +1,546 @@ +package sccmatching + +import ( + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/securitycontext" + + securityv1 "github.com/openshift/api/security/v1" + "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/capabilities" + "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/group" + "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/seccomp" + "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/selinux" + "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sysctl" + "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/user" + sccutil "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util" + podhelpers "k8s.io/kubernetes/pkg/apis/core/pods" +) + +// used to pass in the field being validated for reusable group strategies so they +// can create informative error messages. +const ( + fsGroupField = "fsGroup" + supplementalGroupsField = "supplementalGroups" +) + +// simpleProvider is the default implementation of SecurityContextConstraintsProvider +type simpleProvider struct { + scc *securityv1.SecurityContextConstraints + runAsUserStrategy user.RunAsUserSecurityContextConstraintsStrategy + seLinuxStrategy selinux.SELinuxSecurityContextConstraintsStrategy + fsGroupStrategy group.GroupSecurityContextConstraintsStrategy + supplementalGroupStrategy group.GroupSecurityContextConstraintsStrategy + capabilitiesStrategy capabilities.CapabilitiesSecurityContextConstraintsStrategy + seccompStrategy seccomp.SeccompStrategy + sysctlsStrategy sysctl.SysctlsStrategy +} + +// ensure we implement the interface correctly. +var _ SecurityContextConstraintsProvider = &simpleProvider{} + +// NewSimpleProvider creates a new SecurityContextConstraintsProvider instance. +func NewSimpleProvider(scc *securityv1.SecurityContextConstraints) (SecurityContextConstraintsProvider, error) { + if scc == nil { + return nil, fmt.Errorf("NewSimpleProvider requires a SecurityContextConstraints") + } + + userStrat, err := createUserStrategy(&scc.RunAsUser) + if err != nil { + return nil, err + } + + seLinuxStrat, err := createSELinuxStrategy(&scc.SELinuxContext) + if err != nil { + return nil, err + } + + fsGroupStrat, err := createFSGroupStrategy(&scc.FSGroup) + if err != nil { + return nil, err + } + + supGroupStrat, err := createSupplementalGroupStrategy(&scc.SupplementalGroups) + if err != nil { + return nil, err + } + + capStrat, err := createCapabilitiesStrategy(scc.DefaultAddCapabilities, scc.RequiredDropCapabilities, scc.AllowedCapabilities) + if err != nil { + return nil, err + } + + seccompStrat, err := createSeccompStrategy(scc.SeccompProfiles) + if err != nil { + return nil, err + } + + sysctlsStrat, err := createSysctlsStrategy(sysctl.SafeSysctlAllowlist(), scc.AllowedUnsafeSysctls, scc.ForbiddenSysctls) + if err != nil { + return nil, err + } + + return &simpleProvider{ + scc: scc, + runAsUserStrategy: userStrat, + seLinuxStrategy: seLinuxStrat, + fsGroupStrategy: fsGroupStrat, + supplementalGroupStrategy: supGroupStrat, + capabilitiesStrategy: capStrat, + seccompStrategy: seccompStrat, + sysctlsStrategy: sysctlsStrat, + }, nil +} + +// Create a PodSecurityContext based on the given constraints. If a setting is already set +// on the PodSecurityContext it will not be changed. Validate should be used after the context +// is created to ensure it complies with the required restrictions. +func (s *simpleProvider) CreatePodSecurityContext(pod *api.Pod) (*api.PodSecurityContext, map[string]string, error) { + sc := securitycontext.NewPodSecurityContextMutator(pod.Spec.SecurityContext) + + annotationsCopy := copySS(pod.Annotations) + + if sc.SupplementalGroups() == nil { + supGroups, err := s.supplementalGroupStrategy.Generate(pod) + if err != nil { + return nil, nil, err + } + sc.SetSupplementalGroups(supGroups) + } + + if sc.FSGroup() == nil { + fsGroup, err := s.fsGroupStrategy.GenerateSingle(pod) + if err != nil { + return nil, nil, err + } + sc.SetFSGroup(fsGroup) + } + + if sc.SELinuxOptions() == nil { + seLinux, err := s.seLinuxStrategy.Generate(pod, nil) + if err != nil { + return nil, nil, err + } + sc.SetSELinuxOptions(seLinux) + } + + // This is only generated on the pod level. Containers inherit the pod's profile. If the + // container has a specific profile set then it will be caught in the validation step. + seccompProfile, err := s.seccompStrategy.Generate(pod.Annotations, pod) + if err != nil { + return nil, nil, err + } + if seccompProfile != "" { + if annotationsCopy == nil { + annotationsCopy = map[string]string{} + } + annotationsCopy[api.SeccompPodAnnotationKey] = seccompProfile + sc.SetSeccompProfile(seccompFieldForAnnotation(seccompProfile)) + } + + return sc.PodSecurityContext(), annotationsCopy, nil +} + +// Create a SecurityContext based on the given constraints. If a setting is already set on the +// container's security context then it will not be changed. Validation should be used after +// the context is created to ensure it complies with the required restrictions. +func (s *simpleProvider) CreateContainerSecurityContext(pod *api.Pod, container *api.Container) (*api.SecurityContext, error) { + sc := securitycontext.NewEffectiveContainerSecurityContextMutator( + securitycontext.NewPodSecurityContextAccessor(pod.Spec.SecurityContext), + securitycontext.NewContainerSecurityContextMutator(container.SecurityContext), + ) + if sc.RunAsUser() == nil { + uid, err := s.runAsUserStrategy.Generate(pod, container) + if err != nil { + return nil, err + } + sc.SetRunAsUser(uid) + } + + if sc.SELinuxOptions() == nil { + seLinux, err := s.seLinuxStrategy.Generate(pod, container) + if err != nil { + return nil, err + } + sc.SetSELinuxOptions(seLinux) + } + + // if we're using the non-root strategy set the marker that this container should not be + // run as root which will signal to the kubelet to do a final check either on the runAsUser + // or, if runAsUser is not set, the image + // Alternatively, also set the RunAsNonRoot to true in case the UID value is non-nil and non-zero + // to more easily satisfy the requirements of upstream PodSecurity admission "restricted" profile + // which currently requires all containers to have runAsNonRoot set to true, or to have this set + // in the whole pod's security context + if sc.RunAsNonRoot() == nil { + nonRoot := false + switch runAsUser := sc.RunAsUser(); { + case runAsUser == nil: + if s.scc.RunAsUser.Type == securityv1.RunAsUserStrategyMustRunAsNonRoot { + nonRoot = true + } + case *runAsUser > 0: + nonRoot = true + } + + if nonRoot { + sc.SetRunAsNonRoot(&nonRoot) + } + } + + caps, err := s.capabilitiesStrategy.Generate(pod, container) + if err != nil { + return nil, err + } + sc.SetCapabilities(caps) + + // if the SCC requires a read only root filesystem and the container has not made a specific + // request then default ReadOnlyRootFilesystem to true. + if s.scc.ReadOnlyRootFilesystem && sc.ReadOnlyRootFilesystem() == nil { + readOnlyRootFS := true + sc.SetReadOnlyRootFilesystem(&readOnlyRootFS) + } + + isPrivileged := sc.Privileged() != nil && *sc.Privileged() + addCapSysAdmin := false + if caps != nil { + for _, cap := range caps.Add { + if string(cap) == "CAP_SYS_ADMIN" { + addCapSysAdmin = true + break + } + } + } + + containerSeccomp, ok := pod.Annotations[api.SeccompContainerAnnotationKeyPrefix+container.Name] + if ok { + sc.SetSeccompProfile(seccompFieldForAnnotation(containerSeccomp)) + } + + // if the SCC sets DefaultAllowPrivilegeEscalation and the container security context + // allowPrivilegeEscalation is not set, then default to that set by the SCC. + // + // Exception: privileged pods and CAP_SYS_ADMIN capability + // + // This corresponds to Kube's pod validation: + // + // if sc.AllowPrivilegeEscalation != nil && !*sc.AllowPrivilegeEscalation { + // if sc.Privileged != nil && *sc.Privileged { + // allErrs = append(allErrs, field.Invalid(fldPath, sc, "cannot set `allowPrivilegeEscalation` to false and `privileged` to true")) + // } + // + // if sc.Capabilities != nil { + // for _, cap := range sc.Capabilities.Add { + // if string(cap) == "CAP_SYS_ADMIN" { + // allErrs = append(allErrs, field.Invalid(fldPath, sc, "cannot set `allowPrivilegeEscalation` to false and `capabilities.Add` CAP_SYS_ADMIN")) + // } + // } + // } + // } + if s.scc.DefaultAllowPrivilegeEscalation != nil && sc.AllowPrivilegeEscalation() == nil && !isPrivileged && !addCapSysAdmin { + sc.SetAllowPrivilegeEscalation(s.scc.DefaultAllowPrivilegeEscalation) + } + + // if the SCC sets AllowPrivilegeEscalation to false set that as the default + if s.scc.AllowPrivilegeEscalation != nil && !*s.scc.AllowPrivilegeEscalation && sc.AllowPrivilegeEscalation() == nil { + sc.SetAllowPrivilegeEscalation(s.scc.AllowPrivilegeEscalation) + } + + return sc.ContainerSecurityContext(), nil +} + +// Ensure a pod's SecurityContext is in compliance with the given constraints. +func (s *simpleProvider) ValidatePodSecurityContext(pod *api.Pod, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + sc := securitycontext.NewPodSecurityContextAccessor(pod.Spec.SecurityContext) + + fsGroups := []int64{} + if fsGroup := sc.FSGroup(); fsGroup != nil { + fsGroups = append(fsGroups, *fsGroup) + } + allErrs = append(allErrs, s.fsGroupStrategy.Validate(fldPath, pod, fsGroups)...) + allErrs = append(allErrs, s.supplementalGroupStrategy.Validate(fldPath, pod, sc.SupplementalGroups())...) + allErrs = append(allErrs, s.seccompStrategy.ValidatePod(pod)...) + + allErrs = append(allErrs, s.seLinuxStrategy.Validate(fldPath.Child("seLinuxOptions"), pod, nil, sc.SELinuxOptions())...) + + if !s.scc.AllowHostNetwork && sc.HostNetwork() { + allErrs = append(allErrs, field.Invalid(fldPath.Child("hostNetwork"), sc.HostNetwork(), "Host network is not allowed to be used")) + } + + if !s.scc.AllowHostPID && sc.HostPID() { + allErrs = append(allErrs, field.Invalid(fldPath.Child("hostPID"), sc.HostPID(), "Host PID is not allowed to be used")) + } + + if !s.scc.AllowHostIPC && sc.HostIPC() { + allErrs = append(allErrs, field.Invalid(fldPath.Child("hostIPC"), sc.HostIPC(), "Host IPC is not allowed to be used")) + } + + if s.scc.UserNamespaceLevel == securityv1.NamespaceLevelRequirePod && (sc.HostUsers() == nil || *sc.HostUsers()) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("hostUsers"), sc.HostUsers(), "Host Users must be set to false")) + } + + allErrs = append(allErrs, s.sysctlsStrategy.Validate(pod)...) + + if len(pod.Spec.Volumes) > 0 && !sccutil.SCCAllowsAllVolumes(s.scc) { + allowedVolumes := sccutil.FSTypeToStringSetInternal(s.scc.Volumes) + for i, v := range pod.Spec.Volumes { + fsType, err := sccutil.GetVolumeFSType(v) + if err != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "volumes").Index(i), string(fsType), err.Error())) + continue + } + + if !allowsVolumeType(allowedVolumes, fsType, v.VolumeSource) { + allErrs = append(allErrs, field.Invalid( + field.NewPath("spec", "volumes").Index(i), string(fsType), + fmt.Sprintf("%s volumes are not allowed to be used", string(fsType)))) + } + } + } + + if len(pod.Spec.Volumes) > 0 && len(s.scc.AllowedFlexVolumes) > 0 && sccutil.SCCAllowsFSTypeInternal(s.scc, securityv1.FSTypeFlexVolume) { + for i, v := range pod.Spec.Volumes { + if v.FlexVolume == nil { + continue + } + + found := false + driver := v.FlexVolume.Driver + for _, allowedFlexVolume := range s.scc.AllowedFlexVolumes { + if driver == allowedFlexVolume.Driver { + found = true + break + } + } + if !found { + allErrs = append(allErrs, + field.Invalid(fldPath.Child("volumes").Index(i).Child("driver"), driver, + "Flexvolume driver is not allowed to be used")) + } + } + } + + return allErrs +} + +// Ensure a container's SecurityContext is in compliance with the given constraints +func (s *simpleProvider) ValidateContainerSecurityContext(pod *api.Pod, container *api.Container, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + podSC := securitycontext.NewPodSecurityContextAccessor(pod.Spec.SecurityContext) + sc := securitycontext.NewEffectiveContainerSecurityContextAccessor(podSC, securitycontext.NewContainerSecurityContextMutator(container.SecurityContext)) + + allErrs = append(allErrs, s.runAsUserStrategy.Validate(fldPath, pod, container, sc.RunAsNonRoot(), sc.RunAsUser())...) + allErrs = append(allErrs, s.seLinuxStrategy.Validate(fldPath.Child("seLinuxOptions"), pod, container, sc.SELinuxOptions())...) + allErrs = append(allErrs, s.seccompStrategy.ValidateContainer(pod, container)...) + + privileged := sc.Privileged() + if !s.scc.AllowPrivilegedContainer && privileged != nil && *privileged { + allErrs = append(allErrs, field.Invalid(fldPath.Child("privileged"), *privileged, "Privileged containers are not allowed")) + } + + allErrs = append(allErrs, s.capabilitiesStrategy.Validate(fldPath, pod, container, sc.Capabilities())...) + + if !s.scc.AllowHostNetwork && podSC.HostNetwork() { + allErrs = append(allErrs, field.Invalid(fldPath.Child("hostNetwork"), podSC.HostNetwork(), "Host network is not allowed to be used")) + } + + if !s.scc.AllowHostPorts { + podhelpers.VisitContainersWithPath(&pod.Spec, fldPath, func(container *api.Container, path *field.Path) bool { + allErrs = append(allErrs, s.hasHostPort(container, path)...) + return true + }) + } + + if !s.scc.AllowHostPID && podSC.HostPID() { + allErrs = append(allErrs, field.Invalid(fldPath.Child("hostPID"), podSC.HostPID(), "Host PID is not allowed to be used")) + } + + if !s.scc.AllowHostIPC && podSC.HostIPC() { + allErrs = append(allErrs, field.Invalid(fldPath.Child("hostIPC"), podSC.HostIPC(), "Host IPC is not allowed to be used")) + } + + if s.scc.ReadOnlyRootFilesystem { + readOnly := sc.ReadOnlyRootFilesystem() + if readOnly == nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("readOnlyRootFilesystem"), readOnly, "ReadOnlyRootFilesystem may not be nil and must be set to true")) + } else if !*readOnly { + allErrs = append(allErrs, field.Invalid(fldPath.Child("readOnlyRootFilesystem"), *readOnly, "ReadOnlyRootFilesystem must be set to true")) + } + } + + allowEscalation := sc.AllowPrivilegeEscalation() + if s.scc.AllowPrivilegeEscalation != nil && !*s.scc.AllowPrivilegeEscalation { + if allowEscalation == nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("allowPrivilegeEscalation"), allowEscalation, "Allowing privilege escalation for containers is not allowed")) + } + + if allowEscalation != nil && *allowEscalation { + allErrs = append(allErrs, field.Invalid(fldPath.Child("allowPrivilegeEscalation"), *allowEscalation, "Allowing privilege escalation for containers is not allowed")) + } + } + + return allErrs +} + +// hasHostPort checks the port definitions on the container for HostPort > 0. +func (s *simpleProvider) hasHostPort(container *api.Container, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, cp := range container.Ports { + if cp.HostPort > 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("hostPort"), cp.HostPort, "Host ports are not allowed to be used")) + } + } + return allErrs +} + +func (s *simpleProvider) GetSCC() *securityv1.SecurityContextConstraints { + return s.scc +} + +// Get the name of the SCC that this provider was initialized with. +func (s *simpleProvider) GetSCCName() string { + return s.scc.Name +} + +func (s *simpleProvider) GetSCCUsers() []string { + return s.scc.Users +} + +func (s *simpleProvider) GetSCCGroups() []string { + return s.scc.Groups +} + +// createUserStrategy creates a new user strategy. +func createUserStrategy(opts *securityv1.RunAsUserStrategyOptions) (user.RunAsUserSecurityContextConstraintsStrategy, error) { + switch opts.Type { + case securityv1.RunAsUserStrategyMustRunAs: + return user.NewMustRunAs(opts) + case securityv1.RunAsUserStrategyMustRunAsRange: + return user.NewMustRunAsRange(opts) + case securityv1.RunAsUserStrategyMustRunAsNonRoot: + return user.NewRunAsNonRoot(opts) + case securityv1.RunAsUserStrategyRunAsAny: + return user.NewRunAsAny(opts) + default: + return nil, fmt.Errorf("Unrecognized RunAsUser strategy type %s", opts.Type) + } +} + +// createSELinuxStrategy creates a new selinux strategy. +func createSELinuxStrategy(opts *securityv1.SELinuxContextStrategyOptions) (selinux.SELinuxSecurityContextConstraintsStrategy, error) { + switch opts.Type { + case securityv1.SELinuxStrategyMustRunAs: + return selinux.NewMustRunAs(opts) + case securityv1.SELinuxStrategyRunAsAny: + return selinux.NewRunAsAny(opts) + default: + return nil, fmt.Errorf("Unrecognized SELinuxContext strategy type %s", opts.Type) + } +} + +// createFSGroupStrategy creates a new fsgroup strategy +func createFSGroupStrategy(opts *securityv1.FSGroupStrategyOptions) (group.GroupSecurityContextConstraintsStrategy, error) { + switch opts.Type { + case securityv1.FSGroupStrategyRunAsAny: + return group.NewRunAsAny() + case securityv1.FSGroupStrategyMustRunAs: + return group.NewMustRunAs(opts.Ranges, fsGroupField) + default: + return nil, fmt.Errorf("Unrecognized FSGroup strategy type %s", opts.Type) + } +} + +// createSupplementalGroupStrategy creates a new supplemental group strategy +func createSupplementalGroupStrategy(opts *securityv1.SupplementalGroupsStrategyOptions) (group.GroupSecurityContextConstraintsStrategy, error) { + switch opts.Type { + case securityv1.SupplementalGroupsStrategyRunAsAny: + return group.NewRunAsAny() + case securityv1.SupplementalGroupsStrategyMustRunAs: + return group.NewMustRunAs(opts.Ranges, supplementalGroupsField) + default: + return nil, fmt.Errorf("Unrecognized SupplementalGroups strategy type %s", opts.Type) + } +} + +// createCapabilitiesStrategy creates a new capabilities strategy. +func createCapabilitiesStrategy(defaultAddCaps, requiredDropCaps, allowedCaps []corev1.Capability) (capabilities.CapabilitiesSecurityContextConstraintsStrategy, error) { + return capabilities.NewDefaultCapabilities(defaultAddCaps, requiredDropCaps, allowedCaps) +} + +// createSeccompStrategy creates a new seccomp strategy +func createSeccompStrategy(allowedProfiles []string) (seccomp.SeccompStrategy, error) { + return seccomp.NewSeccompStrategy(allowedProfiles), nil +} + +// createSysctlsStrategy creates a new sysctls strategy +func createSysctlsStrategy(safeWhitelist, allowedUnsafeSysctls, forbiddenSysctls []string) (sysctl.SysctlsStrategy, error) { + return sysctl.NewMustMatchPatterns(safeWhitelist, allowedUnsafeSysctls, forbiddenSysctls), nil +} + +// allowsVolumeType determines whether the type and volume are valid +// given the volumes allowed by an scc. +// +// This function was derived from a psp function of the same name in +// pkg/security/podsecuritypolicy/provider.go and updated for scc +// compatibility. +func allowsVolumeType(allowedVolumes sets.String, fsType securityv1.FSType, volumeSource api.VolumeSource) bool { + if allowedVolumes.Has(string(fsType)) { + return true + } + + // If secret volumes are allowed by the scc, allow the projected + // volume sources that bound service account token volumes expose. + return allowedVolumes.Has(string(securityv1.FSTypeSecret)) && + fsType == securityv1.FSProjected && + sccutil.IsOnlyServiceAccountTokenSources(volumeSource.Projected) +} + +// seccompFieldForAnnotation takes a pod annotation and returns the converted +// seccomp profile field. +// SeccompAnnotations removal is planned for Kube 1.27, remove this logic afterwards +func seccompFieldForAnnotation(annotation string) *api.SeccompProfile { + // If only seccomp annotations are specified, copy the values into the + // corresponding fields. This ensures that existing applications continue + // to enforce seccomp, and prevents the kubelet from needing to resolve + // annotations & fields. + if annotation == corev1.SeccompProfileNameUnconfined { + return &api.SeccompProfile{Type: api.SeccompProfileTypeUnconfined} + } + + if annotation == api.SeccompProfileRuntimeDefault || annotation == api.DeprecatedSeccompProfileDockerDefault { + return &api.SeccompProfile{Type: api.SeccompProfileTypeRuntimeDefault} + } + + if strings.HasPrefix(annotation, corev1.SeccompLocalhostProfileNamePrefix) { + localhostProfile := strings.TrimPrefix(annotation, corev1.SeccompLocalhostProfileNamePrefix) + if localhostProfile != "" { + return &api.SeccompProfile{ + Type: api.SeccompProfileTypeLocalhost, + LocalhostProfile: &localhostProfile, + } + } + } + + // we can only reach this code path if the localhostProfile name has a zero + // length or if the annotation has an unrecognized value + return nil +} + +// CopySS makes a shallow copy of a map. +func copySS(m map[string]string) map[string]string { + if m == nil { + return nil + } + copy := make(map[string]string, len(m)) + for k, v := range m { + copy[k] = v + } + return copy +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccmatching/types.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccmatching/types.go new file mode 100644 index 0000000000000..1c26ec2e863ae --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccmatching/types.go @@ -0,0 +1,28 @@ +package sccmatching + +import ( + securityv1 "github.com/openshift/api/security/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + api "k8s.io/kubernetes/pkg/apis/core" +) + +// SecurityContextConstraintsProvider provides the implementation to generate a new security +// context based on constraints or validate an existing security context against constraints. +type SecurityContextConstraintsProvider interface { + // Create a PodSecurityContext based on the given constraints. + CreatePodSecurityContext(pod *api.Pod) (*api.PodSecurityContext, map[string]string, error) + // Create a container SecurityContext based on the given constraints + CreateContainerSecurityContext(pod *api.Pod, container *api.Container) (*api.SecurityContext, error) + // Ensure a pod's SecurityContext is in compliance with the given constraints. + ValidatePodSecurityContext(pod *api.Pod, fldPath *field.Path) field.ErrorList + // Ensure a container's SecurityContext is in compliance with the given constraints + ValidateContainerSecurityContext(pod *api.Pod, container *api.Container, fldPath *field.Path) field.ErrorList + // Get the SCC that this provider was initialized with. + GetSCC() *securityv1.SecurityContextConstraints + // Get the name of the SCC that this provider was initialized with. + GetSCCName() string + // Get the users associated to the SCC this provider was initialized with + GetSCCUsers() []string + // Get the groups associated to the SCC this provider was initialized with + GetSCCGroups() []string +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/seccomp/strategy.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/seccomp/strategy.go new file mode 100644 index 0000000000000..0d24eda6a668d --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/seccomp/strategy.go @@ -0,0 +1,197 @@ +package seccomp + +import ( + "fmt" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + api "k8s.io/kubernetes/pkg/apis/core" +) + +const ( + allowAnyProfile = "*" +) + +// Strategy defines the interface for all seccomp constraint strategies. +type SeccompStrategy interface { + // Generate returns a profile based on constraint rules. + Generate(annotations map[string]string, pod *api.Pod) (string, error) + // Validate ensures that the specified values fall within the range of the strategy. + ValidatePod(pod *api.Pod) field.ErrorList + // Validate ensures that the specified values fall within the range of the strategy. + ValidateContainer(pod *api.Pod, container *api.Container) field.ErrorList +} + +type strategy struct { + allowedProfiles []string + // does the strategy allow any profile (wildcard) + allowAnyProfile bool + runtimeDefaultAllowed bool +} + +var _ SeccompStrategy = &strategy{} + +// NewStrategy creates a new strategy that enforces seccomp profile constraints. +func NewSeccompStrategy(allowedProfiles []string) SeccompStrategy { + allowAny := false + allowed := make([]string, 0, len(allowedProfiles)) + runtimeDefaultAllowed := false + + for _, p := range allowedProfiles { + if p == allowAnyProfile { + allowAny = true + continue + } + // With the graduation of seccomp to GA we automatically convert + // the deprecated seccomp profile `docker/default` to `runtime/default`. + // This means that we now have to automatically allow `runtime/default` + // if a user specifies `docker/default` and vice versa in an SCC. + if p == v1.DeprecatedSeccompProfileDockerDefault || p == v1.SeccompProfileRuntimeDefault { + runtimeDefaultAllowed = true + } + allowed = append(allowed, p) + } + + return &strategy{ + allowedProfiles: allowed, + allowAnyProfile: allowAny, + runtimeDefaultAllowed: runtimeDefaultAllowed, + } +} + +// Generate returns a profile based on constraint rules. +func (s *strategy) Generate(podAnnotations map[string]string, pod *api.Pod) (string, error) { + if podAnnotations[api.SeccompPodAnnotationKey] != "" { + // Profile already set, nothing to do. + return podAnnotations[api.SeccompPodAnnotationKey], nil + } + if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.SeccompProfile != nil { + // Profile field already set, translate to annotation. + return seccompAnnotationForField(pod.Spec.SecurityContext.SeccompProfile), nil + } + + // return the first non-wildcard profile + if len(s.allowedProfiles) > 0 { + return s.allowedProfiles[0], nil + } + + return "", nil +} + +// ValidatePod ensures that the specified values on the pod fall within the range +// of the strategy. +func (s *strategy) ValidatePod(pod *api.Pod) field.ErrorList { + allErrs := field.ErrorList{} + podSpecFieldPath := field.NewPath("pod", "metadata", "annotations").Key(api.SeccompPodAnnotationKey) + podProfile := pod.Annotations[api.SeccompPodAnnotationKey] + // if the annotation is not set, see if the field is set and derive the corresponding annotation value + // We are keeping annotations for backward compatibility - in case the pod is + // running on an older node. + if len(podProfile) == 0 && pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.SeccompProfile != nil { + podProfile = seccompAnnotationForField(pod.Spec.SecurityContext.SeccompProfile) + } + + if err := s.validateProfile(podSpecFieldPath, podProfile); err != nil { + allErrs = append(allErrs, err) + } + + return allErrs +} + +// ValidateContainer ensures that the specified values on the container fall within +// the range of the strategy. +func (s *strategy) ValidateContainer(pod *api.Pod, container *api.Container) field.ErrorList { + allErrs := field.ErrorList{} + fieldPath := field.NewPath("pod", "metadata", "annotations").Key(api.SeccompContainerAnnotationKeyPrefix + container.Name) + containerProfile := profileForContainer(pod, container) + + if err := s.validateProfile(fieldPath, containerProfile); err != nil { + allErrs = append(allErrs, err) + } + + return allErrs +} + +// validateProfile checks if profile is in allowedProfiles or if allowedProfiles +// contains the wildcard. +func (s *strategy) validateProfile(fldPath *field.Path, profile string) *field.Error { + if !s.allowAnyProfile && len(s.allowedProfiles) == 0 && profile != "" { + return field.Forbidden(fldPath, "seccomp may not be set") + } + + // for backwards compatibility and SCCs without a defined list of allowed profiles. + // If a SCC does not have allowedProfiles set then we should allow an empty profile. + // This will mean that the runtime default is used. + if len(s.allowedProfiles) == 0 && profile == "" { + return nil + } + + if s.allowAnyProfile { + return nil + } + + for _, p := range s.allowedProfiles { + if profile == p { + return nil + } + + // With the graduation of seccomp to GA we automatically convert + // the deprecated seccomp profile `docker/default` to `runtime/default`. + // This means that we now have to automatically allow `runtime/default` + // if a user specifies `docker/default` and vice versa in an SCC. + if s.runtimeDefaultAllowed && + (profile == v1.DeprecatedSeccompProfileDockerDefault || + profile == v1.SeccompProfileRuntimeDefault) { + return nil + } + } + + return field.Forbidden(fldPath, fmt.Sprintf("%s is not an allowed seccomp profile. Valid values are %v", profile, s.allowedProfiles)) +} + +// profileForContainer returns the container profile if set, otherwise the pod profile. +func profileForContainer(pod *api.Pod, container *api.Container) string { + if container.SecurityContext != nil && container.SecurityContext.SeccompProfile != nil { + // derive the annotation value from the container field + return seccompAnnotationForField(container.SecurityContext.SeccompProfile) + } + containerProfile, ok := pod.Annotations[api.SeccompContainerAnnotationKeyPrefix+container.Name] + if ok { + // return the existing container annotation + return containerProfile + } + if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.SeccompProfile != nil { + // derive the annotation value from the pod field + return seccompAnnotationForField(pod.Spec.SecurityContext.SeccompProfile) + } + // return the existing pod annotation + return pod.Annotations[api.SeccompPodAnnotationKey] +} + +// seccompAnnotationForField takes a pod seccomp profile field and returns the +// converted annotation value. +// DEPRECATED: this is originally from k8s.io/kubernetes/pkg/api pod module which has +// been removed in upstream: https://github.com/kubernetes/kubernetes/pull/114947/files. +// TODO(auth team): remove once we stop handling the annotation. +func seccompAnnotationForField(field *api.SeccompProfile) string { + // If only seccomp fields are specified, add the corresponding annotations. + // This ensures that the fields are enforced even if the node version + // trails the API version + switch field.Type { + case api.SeccompProfileTypeUnconfined: + return v1.SeccompProfileNameUnconfined + + case api.SeccompProfileTypeRuntimeDefault: + return v1.SeccompProfileRuntimeDefault + + case api.SeccompProfileTypeLocalhost: + if field.LocalhostProfile != nil { + return v1.SeccompLocalhostProfileNamePrefix + *field.LocalhostProfile + } + } + + // we can only reach this code path if the LocalhostProfile is nil but the + // provided field type is SeccompProfileTypeLocalhost or if an unrecognized + // type is specified + return "" +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/selinux/convert.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/selinux/convert.go new file mode 100644 index 0000000000000..f11900ef22e2a --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/selinux/convert.go @@ -0,0 +1,24 @@ +package selinux + +import ( + corev1 "k8s.io/api/core/v1" + coreapi "k8s.io/kubernetes/pkg/apis/core" + corev1conversions "k8s.io/kubernetes/pkg/apis/core/v1" +) + +func ToInternalSELinuxOptions(external *corev1.SELinuxOptions) (*coreapi.SELinuxOptions, error) { + if external == nil { + return nil, nil + } + internal := &coreapi.SELinuxOptions{} + err := corev1conversions.Convert_v1_SELinuxOptions_To_core_SELinuxOptions(external, internal, nil) + return internal, err +} + +func ToInternalSELinuxOptionsOrDie(external *corev1.SELinuxOptions) *coreapi.SELinuxOptions { + ret, err := ToInternalSELinuxOptions(external) + if err != nil { + panic(err) + } + return ret +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/selinux/doc.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/selinux/doc.go new file mode 100644 index 0000000000000..4ec01e969b00b --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/selinux/doc.go @@ -0,0 +1,2 @@ +// Package selinux contains security context constraints SELinux strategy implementations. +package selinux diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/selinux/mustrunas.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/selinux/mustrunas.go new file mode 100644 index 0000000000000..9e3b319b81d5a --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/selinux/mustrunas.go @@ -0,0 +1,105 @@ +package selinux + +import ( + "fmt" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/util/validation/field" + coreapi "k8s.io/kubernetes/pkg/apis/core" + + securityv1 "github.com/openshift/api/security/v1" + "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util" +) + +type mustRunAs struct { + opts *securityv1.SELinuxContextStrategyOptions +} + +var _ SELinuxSecurityContextConstraintsStrategy = &mustRunAs{} + +func NewMustRunAs(options *securityv1.SELinuxContextStrategyOptions) (SELinuxSecurityContextConstraintsStrategy, error) { + if options == nil { + return nil, fmt.Errorf("MustRunAs requires SELinuxContextStrategyOptions") + } + if options.SELinuxOptions == nil { + return nil, fmt.Errorf("MustRunAs requires SELinuxOptions") + } + return &mustRunAs{ + opts: options, + }, nil +} + +// Generate creates the SELinuxOptions based on constraint rules. +func (s *mustRunAs) Generate(_ *coreapi.Pod, _ *coreapi.Container) (*coreapi.SELinuxOptions, error) { + return ToInternalSELinuxOptions(s.opts.SELinuxOptions) +} + +// Validate ensures that the specified values fall within the range of the strategy. +func (s *mustRunAs) Validate(fldPath *field.Path, _ *coreapi.Pod, _ *coreapi.Container, seLinux *coreapi.SELinuxOptions) field.ErrorList { + allErrs := field.ErrorList{} + + if seLinux == nil { + allErrs = append(allErrs, field.Required(fldPath, "")) + return allErrs + } + if !equalLevels(s.opts.SELinuxOptions.Level, seLinux.Level) { + detail := fmt.Sprintf("must be %s", s.opts.SELinuxOptions.Level) + allErrs = append(allErrs, field.Invalid(fldPath.Child("level"), seLinux.Level, detail)) + } + if seLinux.Role != s.opts.SELinuxOptions.Role { + detail := fmt.Sprintf("must be %s", s.opts.SELinuxOptions.Role) + allErrs = append(allErrs, field.Invalid(fldPath.Child("role"), seLinux.Role, detail)) + } + if seLinux.Type != s.opts.SELinuxOptions.Type { + detail := fmt.Sprintf("must be %s", s.opts.SELinuxOptions.Type) + allErrs = append(allErrs, field.Invalid(fldPath.Child("type"), seLinux.Type, detail)) + } + if seLinux.User != s.opts.SELinuxOptions.User { + detail := fmt.Sprintf("must be %s", s.opts.SELinuxOptions.User) + allErrs = append(allErrs, field.Invalid(fldPath.Child("user"), seLinux.User, detail)) + } + + return allErrs +} + +// equalLevels compares SELinux levels for equality. +func equalLevels(expected, actual string) bool { + if expected == actual { + return true + } + // "s0:c6,c0" => [ "s0", "c6,c0" ] + expectedParts := strings.SplitN(expected, ":", 2) + actualParts := strings.SplitN(actual, ":", 2) + + // both SELinux levels must be in a format "sX:cY" + if len(expectedParts) != 2 || len(actualParts) != 2 { + return false + } + + if !equalSensitivity(expectedParts[0], actualParts[0]) { + return false + } + + if !equalCategories(expectedParts[1], actualParts[1]) { + return false + } + + return true +} + +// equalSensitivity compares sensitivities of the SELinux levels for equality. +func equalSensitivity(expected, actual string) bool { + return expected == actual +} + +// equalCategories compares categories of the SELinux levels for equality. +func equalCategories(expected, actual string) bool { + expectedCategories := strings.Split(expected, ",") + actualCategories := strings.Split(actual, ",") + + sort.Strings(expectedCategories) + sort.Strings(actualCategories) + + return util.EqualStringSlices(expectedCategories, actualCategories) +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/selinux/runasany.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/selinux/runasany.go new file mode 100644 index 0000000000000..95adb503628cb --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/selinux/runasany.go @@ -0,0 +1,28 @@ +package selinux + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + coreapi "k8s.io/kubernetes/pkg/apis/core" + + securityv1 "github.com/openshift/api/security/v1" +) + +// runAsAny implements the SELinuxSecurityContextConstraintsStrategy interface. +type runAsAny struct{} + +var _ SELinuxSecurityContextConstraintsStrategy = &runAsAny{} + +// NewRunAsAny provides a strategy that will return the configured se linux context or nil. +func NewRunAsAny(options *securityv1.SELinuxContextStrategyOptions) (SELinuxSecurityContextConstraintsStrategy, error) { + return &runAsAny{}, nil +} + +// Generate creates the SELinuxOptions based on constraint rules. +func (s *runAsAny) Generate(pod *coreapi.Pod, container *coreapi.Container) (*coreapi.SELinuxOptions, error) { + return nil, nil +} + +// Validate ensures that the specified values fall within the range of the strategy. +func (s *runAsAny) Validate(fldPath *field.Path, _ *coreapi.Pod, _ *coreapi.Container, options *coreapi.SELinuxOptions) field.ErrorList { + return field.ErrorList{} +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/selinux/types.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/selinux/types.go new file mode 100644 index 0000000000000..4cd5f3e8f2b5a --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/selinux/types.go @@ -0,0 +1,14 @@ +package selinux + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + coreapi "k8s.io/kubernetes/pkg/apis/core" +) + +// SELinuxSecurityContextConstraintsStrategy defines the interface for all SELinux constraint strategies. +type SELinuxSecurityContextConstraintsStrategy interface { + // Generate creates the SELinuxOptions based on constraint rules. + Generate(pod *coreapi.Pod, container *coreapi.Container) (*coreapi.SELinuxOptions, error) + // Validate ensures that the specified values fall within the range of the strategy. + Validate(fldPath *field.Path, pod *coreapi.Pod, container *coreapi.Container, options *coreapi.SELinuxOptions) field.ErrorList +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sysctl/mustmatchpatterns.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sysctl/mustmatchpatterns.go new file mode 100644 index 0000000000000..4a67b043c50d6 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sysctl/mustmatchpatterns.go @@ -0,0 +1,130 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sysctl + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/util/validation/field" + api "k8s.io/kubernetes/pkg/apis/core" +) + +// SafeSysctlAllowlist returns the allowlist of safe sysctls and safe sysctl patterns (ending in *). +// +// A sysctl is called safe iff +// - it is namespaced in the container or the pod +// - it is isolated, i.e. has no influence on any other pod on the same node. +func SafeSysctlAllowlist() []string { + return []string{ + "kernel.shm_rmid_forced", + "net.ipv4.ip_local_port_range", + "net.ipv4.tcp_syncookies", + "net.ipv4.ping_group_range", + "net.ipv4.ip_unprivileged_port_start", + "net.ipv4.tcp_keepalive_time", + "net.ipv4.tcp_fin_timeout", + "net.ipv4.tcp_keepalive_intvl", + "net.ipv4.tcp_keepalive_probes", + } +} + +// mustMatchPatterns implements the SysctlsStrategy interface +type mustMatchPatterns struct { + safeAllowlist []string + allowedUnsafeSysctls []string + forbiddenSysctls []string +} + +var ( + _ SysctlsStrategy = &mustMatchPatterns{} +) + +// NewMustMatchPatterns creates a new mustMatchPatterns strategy that will provide validation. +// Passing nil means the default pattern, passing an empty list means to disallow all sysctls. +func NewMustMatchPatterns(safeAllowlist, allowedUnsafeSysctls, forbiddenSysctls []string) SysctlsStrategy { + return &mustMatchPatterns{ + safeAllowlist: safeAllowlist, + allowedUnsafeSysctls: allowedUnsafeSysctls, + forbiddenSysctls: forbiddenSysctls, + } +} + +func (s *mustMatchPatterns) isForbidden(sysctlName string) bool { + // Is the sysctl forbidden? + for _, s := range s.forbiddenSysctls { + if strings.HasSuffix(s, "*") { + prefix := strings.TrimSuffix(s, "*") + if strings.HasPrefix(sysctlName, prefix) { + return true + } + } else if sysctlName == s { + return true + } + } + return false +} + +func (s *mustMatchPatterns) isSafe(sysctlName string) bool { + for _, ws := range s.safeAllowlist { + if sysctlName == ws { + return true + } + } + return false +} + +func (s *mustMatchPatterns) isAllowedUnsafe(sysctlName string) bool { + for _, s := range s.allowedUnsafeSysctls { + if strings.HasSuffix(s, "*") { + prefix := strings.TrimSuffix(s, "*") + if strings.HasPrefix(sysctlName, prefix) { + return true + } + } else if sysctlName == s { + return true + } + } + return false +} + +// Validate ensures that the specified values fall within the range of the strategy. +func (s *mustMatchPatterns) Validate(pod *api.Pod) field.ErrorList { + allErrs := field.ErrorList{} + + var sysctls []api.Sysctl + if pod.Spec.SecurityContext != nil { + sysctls = pod.Spec.SecurityContext.Sysctls + } + + fieldPath := field.NewPath("pod", "spec", "securityContext").Child("sysctls") + + for i, sysctl := range sysctls { + switch { + case s.isForbidden(sysctl.Name): + allErrs = append(allErrs, field.ErrorList{field.Forbidden(fieldPath.Index(i), fmt.Sprintf("sysctl %q is not allowed", sysctl.Name))}...) + case s.isSafe(sysctl.Name): + continue + case s.isAllowedUnsafe(sysctl.Name): + continue + default: + allErrs = append(allErrs, field.ErrorList{field.Forbidden(fieldPath.Index(i), fmt.Sprintf("unsafe sysctl %q is not allowed", sysctl.Name))}...) + } + } + + return allErrs +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sysctl/types.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sysctl/types.go new file mode 100644 index 0000000000000..a6c2034a8d41d --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sysctl/types.go @@ -0,0 +1,28 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sysctl + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + api "k8s.io/kubernetes/pkg/apis/core" +) + +// SysctlsStrategy defines the interface for all sysctl strategies. +type SysctlsStrategy interface { + // Validate ensures that the specified values fall within the range of the strategy. + Validate(pod *api.Pod) field.ErrorList +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/user/doc.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/user/doc.go new file mode 100644 index 0000000000000..f28032767eba0 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/user/doc.go @@ -0,0 +1,2 @@ +// Package user contains security context constraints user strategy implementations. +package user diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/user/mustrunas.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/user/mustrunas.go new file mode 100644 index 0000000000000..455d7f8def1b2 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/user/mustrunas.go @@ -0,0 +1,53 @@ +package user + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/validation/field" + api "k8s.io/kubernetes/pkg/apis/core" + + securityv1 "github.com/openshift/api/security/v1" +) + +// mustRunAs implements the RunAsUserSecurityContextConstraintsStrategy interface +type mustRunAs struct { + opts *securityv1.RunAsUserStrategyOptions +} + +var _ RunAsUserSecurityContextConstraintsStrategy = &mustRunAs{} + +// NewMustRunAs provides a strategy that requires the container to run as a specific UID. +func NewMustRunAs(options *securityv1.RunAsUserStrategyOptions) (RunAsUserSecurityContextConstraintsStrategy, error) { + if options == nil { + return nil, fmt.Errorf("MustRunAs requires run as user options") + } + if options.UID == nil { + return nil, fmt.Errorf("MustRunAs requires a UID") + } + return &mustRunAs{ + opts: options, + }, nil +} + +// Generate creates the uid based on policy rules. MustRunAs returns the UID it is initialized with. +func (s *mustRunAs) Generate(pod *api.Pod, container *api.Container) (*int64, error) { + return s.opts.UID, nil +} + +// Validate ensures that the specified values fall within the range of the strategy. +func (s *mustRunAs) Validate(fldPath *field.Path, _ *api.Pod, _ *api.Container, runAsNonRoot *bool, runAsUser *int64) field.ErrorList { + allErrs := field.ErrorList{} + + if runAsUser == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("runAsUser"), "")) + return allErrs + } + + if *s.opts.UID != *runAsUser { + detail := fmt.Sprintf("must be: %v", *s.opts.UID) + allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *runAsUser, detail)) + return allErrs + } + + return allErrs +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/user/mustrunasrange.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/user/mustrunasrange.go new file mode 100644 index 0000000000000..1ca709e2a43a8 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/user/mustrunasrange.go @@ -0,0 +1,56 @@ +package user + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/validation/field" + api "k8s.io/kubernetes/pkg/apis/core" + + securityv1 "github.com/openshift/api/security/v1" +) + +// mustRunAsRange implements the RunAsUserSecurityContextConstraintsStrategy interface +type mustRunAsRange struct { + opts *securityv1.RunAsUserStrategyOptions +} + +var _ RunAsUserSecurityContextConstraintsStrategy = &mustRunAsRange{} + +// NewMustRunAsRange provides a strategy that requires the container to run as a specific UID in a range. +func NewMustRunAsRange(options *securityv1.RunAsUserStrategyOptions) (RunAsUserSecurityContextConstraintsStrategy, error) { + if options == nil { + return nil, fmt.Errorf("MustRunAsRange requires run as user options") + } + if options.UIDRangeMin == nil { + return nil, fmt.Errorf("MustRunAsRange requires a UIDRangeMin") + } + if options.UIDRangeMax == nil { + return nil, fmt.Errorf("MustRunAsRange requires a UIDRangeMax") + } + return &mustRunAsRange{ + opts: options, + }, nil +} + +// Generate creates the uid based on policy rules. MustRunAs returns the UIDRangeMin it is initialized with. +func (s *mustRunAsRange) Generate(pod *api.Pod, container *api.Container) (*int64, error) { + return s.opts.UIDRangeMin, nil +} + +// Validate ensures that the specified values fall within the range of the strategy. +func (s *mustRunAsRange) Validate(fldPath *field.Path, _ *api.Pod, _ *api.Container, runAsNonRoot *bool, runAsUser *int64) field.ErrorList { + allErrs := field.ErrorList{} + + if runAsUser == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("runAsUser"), "")) + return allErrs + } + + if *runAsUser < *s.opts.UIDRangeMin || *runAsUser > *s.opts.UIDRangeMax { + detail := fmt.Sprintf("must be in the ranges: [%v, %v]", *s.opts.UIDRangeMin, *s.opts.UIDRangeMax) + allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *runAsUser, detail)) + return allErrs + } + + return allErrs +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/user/nonroot.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/user/nonroot.go new file mode 100644 index 0000000000000..52e27a3d83601 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/user/nonroot.go @@ -0,0 +1,43 @@ +package user + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + api "k8s.io/kubernetes/pkg/apis/core" + + securityv1 "github.com/openshift/api/security/v1" +) + +type nonRoot struct{} + +var _ RunAsUserSecurityContextConstraintsStrategy = &nonRoot{} + +func NewRunAsNonRoot(options *securityv1.RunAsUserStrategyOptions) (RunAsUserSecurityContextConstraintsStrategy, error) { + return &nonRoot{}, nil +} + +// Generate creates the uid based on policy rules. This strategy does return a UID. It assumes +// that the user will specify a UID or the container image specifies a UID. +func (s *nonRoot) Generate(pod *api.Pod, container *api.Container) (*int64, error) { + return nil, nil +} + +// Validate ensures that the specified values fall within the range of the strategy. Validation +// of this will pass if either the UID is not set, assuming that the image will provided the UID +// or if the UID is set it is not root. In order to work properly this assumes that the kubelet +// will populate an +func (s *nonRoot) Validate(fldPath *field.Path, _ *api.Pod, _ *api.Container, runAsNonRoot *bool, runAsUser *int64) field.ErrorList { + allErrs := field.ErrorList{} + if runAsNonRoot == nil && runAsUser == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("runAsNonRoot"), "must be true")) + return allErrs + } + if runAsNonRoot != nil && *runAsNonRoot == false { + allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsNonRoot"), *runAsNonRoot, "must be true")) + return allErrs + } + if runAsUser != nil && *runAsUser == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *runAsUser, "running with the root UID is forbidden")) + return allErrs + } + return allErrs +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/user/runasany.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/user/runasany.go new file mode 100644 index 0000000000000..f56505ed1d4cc --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/user/runasany.go @@ -0,0 +1,28 @@ +package user + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + api "k8s.io/kubernetes/pkg/apis/core" + + securityv1 "github.com/openshift/api/security/v1" +) + +// runAsAny implements the interface RunAsUserSecurityContextConstraintsStrategy. +type runAsAny struct{} + +var _ RunAsUserSecurityContextConstraintsStrategy = &runAsAny{} + +// NewRunAsAny provides a strategy that will return nil. +func NewRunAsAny(options *securityv1.RunAsUserStrategyOptions) (RunAsUserSecurityContextConstraintsStrategy, error) { + return &runAsAny{}, nil +} + +// Generate creates the uid based on policy rules. +func (s *runAsAny) Generate(pod *api.Pod, container *api.Container) (*int64, error) { + return nil, nil +} + +// Validate ensures that the specified values fall within the range of the strategy. +func (s *runAsAny) Validate(fldPath *field.Path, _ *api.Pod, _ *api.Container, runAsNonRoot *bool, runAsUser *int64) field.ErrorList { + return field.ErrorList{} +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/user/types.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/user/types.go new file mode 100644 index 0000000000000..fd60d45fd1364 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/user/types.go @@ -0,0 +1,14 @@ +package user + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + api "k8s.io/kubernetes/pkg/apis/core" +) + +// RunAsUserSecurityContextConstraintsStrategy defines the interface for all uid constraint strategies. +type RunAsUserSecurityContextConstraintsStrategy interface { + // Generate creates the uid based on policy rules. + Generate(pod *api.Pod, container *api.Container) (*int64, error) + // Validate ensures that the specified values fall within the range of the strategy. + Validate(fldPath *field.Path, pod *api.Pod, container *api.Container, runAsNonRoot *bool, runAsUser *int64) field.ErrorList +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util/sort/bypriority.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util/sort/bypriority.go new file mode 100644 index 0000000000000..f0f1f9b7c38c0 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util/sort/bypriority.go @@ -0,0 +1,58 @@ +package sort + +import ( + securityv1 "github.com/openshift/api/security/v1" +) + +// ByPriority is a helper to sort SCCs based on priority. If priorities are equal +// a string compare of the name is used. +type ByPriority []*securityv1.SecurityContextConstraints + +func (s ByPriority) Len() int { + return len(s) +} +func (s ByPriority) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s ByPriority) Less(i, j int) bool { + ret, _ := s.LessWithReason(i, j) + return ret +} +func (s ByPriority) LessWithReason(i, j int) (bool, string) { + iSCC := s[i] + jSCC := s[j] + + iSCCPriority := getPriority(iSCC) + jSCCPriority := getPriority(jSCC) + + // a higher priority is considered "less" so that it moves to the front of the line + if iSCCPriority > jSCCPriority { + return true, "has higher priority" + } + + if iSCCPriority < jSCCPriority { + return false, "has lower priority" + } + + // priorities are equal, let's try point values + iRestrictionScore := pointValue(iSCC) + jRestrictionScore := pointValue(jSCC) + + // a lower restriction score is considered "less" so that it moves to the front of the line + // (the greater the score, the more lax the SCC is) + if iRestrictionScore < jRestrictionScore { + return true, moreRestrictiveReason(iRestrictionScore, jRestrictionScore) + } + + if iRestrictionScore > jRestrictionScore { + return false, moreRestrictiveReason(jRestrictionScore, iRestrictionScore) + } + + // they are still equal, sort by name + return iSCC.Name < jSCC.Name, "ordered by name" +} + +func getPriority(scc *securityv1.SecurityContextConstraints) int { + if scc.Priority == nil { + return 0 + } + return int(*scc.Priority) +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util/sort/byrestrictions.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util/sort/byrestrictions.go new file mode 100644 index 0000000000000..a55f979a57436 --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util/sort/byrestrictions.go @@ -0,0 +1,241 @@ +package sort + +import ( + "fmt" + "strings" + + "k8s.io/klog/v2" + + corev1 "k8s.io/api/core/v1" + + securityv1 "github.com/openshift/api/security/v1" +) + +// ByRestrictions is a helper to sort SCCs in order of most restrictive to least restrictive. +type ByRestrictions []*securityv1.SecurityContextConstraints + +func (s ByRestrictions) Len() int { + return len(s) +} +func (s ByRestrictions) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s ByRestrictions) Less(i, j int) bool { + return pointValue(s[i]) < pointValue(s[j]) +} + +// The following constants define the weight of the restrictions and used for +// calculating the points of the particular SCC. The lower the number, the more +// restrictive SCC is. Make sure that weak restrictions are always valued +// higher than the combination of the strong restrictions. + +// To be able to reason about what restriction was favored to be more restrictive +// ensure that number ranges between distinct restrictions are mutually exclusive. + +type points int + +const ( + // max total 3_189_999 = 1_600_000 + 1_589_999 + privilegedPoints points = 1_600_000 + + // max total: 1_589_999 = 800_000 + 789_999 + hostPortsPoints points = 800_000 + + // max total: 789_999 = 400_000 + 389_999 + hostNetworkPoints points = 400_000 + + // max total: 389_999 = 200_000 + 189_999 + hostVolumePoints points = 200_000 + + // max total 189_999 = 100_000 + 89_999 + nonTrivialVolumePoints points = 100_000 + + // Note: boundaries for runAs* must be considered twice, + // because they are accumulated for both SELinuxContext.Type + // and RunAsUser.Type. + // + // max total 89_999 = (40_000 * 2) + 9999 + runAsAnyUserPoints points = 40_000 + runAsNonRootPoints points = 30_000 + runAsRangePoints points = 20_000 + runAsUserPoints points = 10_000 + + // cap* max points = 9999 + capDefaultPoints points = 5000 + capAddOnePoints points = 300 + capAllowAllPoints points = 4000 + capAllowOnePoints points = 10 + capDropAllPoints points = -3000 + capDropOnePoints points = -50 + capMaxPoints points = 9999 + capMinPoints points = 0 + + noPoints points = 0 +) + +func moreRestrictiveReason(p, q points) string { + if p >= q { + return "" + } + + var done bool + var reason string + dueTo := func(x points, what string) (points, points, string, bool) { + switch { + case p >= x && q >= x: + p -= x + q -= x + case p < x && q >= x: + return p, q, fmt.Sprintf("forbids %s", what), true + } + return p, q, "", false + } + if p, q, reason, done = dueTo(privilegedPoints, "privileged"); done { + return reason + } + if p, q, reason, done = dueTo(hostPortsPoints, "host ports"); done { + return reason + } + if p, q, reason, done = dueTo(hostNetworkPoints, "host networking"); done { + return reason + } + if p, q, reason, done = dueTo(hostVolumePoints, "host volume mounts"); done { + return reason + } + if p, q, reason, done = dueTo(nonTrivialVolumePoints, "non-trivial volume mounts"); done { + return reason + } + + runsAsP, capP := p/10000, p%10000 + runsAsQ, capQ := q/10000, q%10000 + + if runsAsP < runsAsQ { + // this can be either SELinuxContext.Type or RunAsUser.Type + return "permits less runAs strategies" + } + if capP < capQ { + return "permits less capabilities" + } + + // this should never happen due to the comparison at the very top + return "is equally restrictive" +} + +// pointValue places a value on the SCC based on the settings of the SCC that can be used +// to determine how restrictive it is. The lower the number, the more restrictive it is. +func pointValue(constraint *securityv1.SecurityContextConstraints) points { + totalPoints := noPoints + + if constraint.AllowPrivilegedContainer { + totalPoints += privilegedPoints + } + + // add points based on volume requests + totalPoints += volumePointValue(constraint) + + if constraint.AllowHostNetwork { + totalPoints += hostNetworkPoints + } + if constraint.AllowHostPorts { + totalPoints += hostPortsPoints + } + + // add points based on capabilities + totalPoints += capabilitiesPointValue(constraint) + + // the map contains points for both RunAsUser and SELinuxContext + // strategies by taking advantage that they have identical strategy names + strategiesPoints := map[string]points{ + string(securityv1.RunAsUserStrategyRunAsAny): runAsAnyUserPoints, + string(securityv1.RunAsUserStrategyMustRunAsNonRoot): runAsNonRootPoints, + string(securityv1.RunAsUserStrategyMustRunAsRange): runAsRangePoints, + string(securityv1.RunAsUserStrategyMustRunAs): runAsUserPoints, + } + + strategyType := string(constraint.SELinuxContext.Type) + points, found := strategiesPoints[strategyType] + if found { + totalPoints += points + } else { + klog.Warningf("SELinuxContext type %q has no point value, this may cause issues in sorting SCCs by restriction", strategyType) + } + + strategyType = string(constraint.RunAsUser.Type) + points, found = strategiesPoints[strategyType] + if found { + totalPoints += points + } else { + klog.Warningf("RunAsUser type %q has no point value, this may cause issues in sorting SCCs by restriction", strategyType) + } + + return totalPoints +} + +// volumePointValue returns a score based on the volumes allowed by the SCC. +// Allowing a host volume will return a score of 200_000. Allowance of anything other +// than Secret, ConfigMap, EmptyDir, DownwardAPI, Projected, and None will result in +// a score of 100_000. If the SCC only allows these trivial types, it will have a +// score of 0. +func volumePointValue(scc *securityv1.SecurityContextConstraints) points { + hasHostVolume := false + hasNonTrivialVolume := false + for _, v := range scc.Volumes { + switch v { + case securityv1.FSTypeHostPath, securityv1.FSTypeAll: + hasHostVolume = true + // nothing more to do, this is the max point value + break + // it is easier to specifically list the trivial volumes and allow the + // default case to be non-trivial so we don't have to worry about adding + // volumes in the future unless they're trivial. + case securityv1.FSTypeSecret, securityv1.FSTypeConfigMap, securityv1.FSTypeEmptyDir, + securityv1.FSTypeDownwardAPI, securityv1.FSProjected, securityv1.FSTypeNone: + // do nothing + default: + hasNonTrivialVolume = true + } + } + + if hasHostVolume { + return hostVolumePoints + } + if hasNonTrivialVolume { + return nonTrivialVolumePoints + } + return noPoints +} + +// hasCap checks for needle in haystack. +func hasCap(needle string, haystack []corev1.Capability) bool { + for _, c := range haystack { + if needle == strings.ToUpper(string(c)) { + return true + } + } + return false +} + +// capabilitiesPointValue returns a score based on the capabilities allowed, +// added, or removed by the SCC. This allow us to prefer the more restrictive +// SCC. +// It never returns a score higher than capMaxPoints and lower than capMinPoints. +func capabilitiesPointValue(scc *securityv1.SecurityContextConstraints) points { + capsPoints := capDefaultPoints + capsPoints += capAddOnePoints * points(len(scc.DefaultAddCapabilities)) + if hasCap(string(securityv1.AllowAllCapabilities), scc.AllowedCapabilities) { + capsPoints += capAllowAllPoints + } else if hasCap("ALL", scc.AllowedCapabilities) { + capsPoints += capAllowAllPoints + } else { + capsPoints += capAllowOnePoints * points(len(scc.AllowedCapabilities)) + } + if hasCap("ALL", scc.RequiredDropCapabilities) { + capsPoints += capDropAllPoints + } else { + capsPoints += capDropOnePoints * points(len(scc.RequiredDropCapabilities)) + } + if capsPoints > capMaxPoints { + return capMaxPoints + } else if capsPoints < capMinPoints { + return capMinPoints + } + return capsPoints +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util/util.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util/util.go new file mode 100644 index 0000000000000..dfca9ccac787f --- /dev/null +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util/util.go @@ -0,0 +1,246 @@ +package util + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/sets" + api "k8s.io/kubernetes/pkg/apis/core" + + securityv1 "github.com/openshift/api/security/v1" +) + +func GetAllFSTypesExcept(exceptions ...string) sets.String { + fstypes := GetAllFSTypesAsSet() + for _, e := range exceptions { + fstypes.Delete(e) + } + return fstypes +} + +func GetAllFSTypesAsSet() sets.String { + fstypes := sets.NewString() + fstypes.Insert( + string(securityv1.FSTypeHostPath), + string(securityv1.FSTypeAzureFile), + string(securityv1.FSTypeFlocker), + string(securityv1.FSTypeFlexVolume), + string(securityv1.FSTypeEmptyDir), + string(securityv1.FSTypeGCEPersistentDisk), + string(securityv1.FSTypeAWSElasticBlockStore), + string(securityv1.FSTypeGitRepo), + string(securityv1.FSTypeSecret), + string(securityv1.FSTypeNFS), + string(securityv1.FSTypeISCSI), + string(securityv1.FSTypeGlusterfs), + string(securityv1.FSTypePersistentVolumeClaim), + string(securityv1.FSTypeRBD), + string(securityv1.FSTypeCinder), + string(securityv1.FSTypeCephFS), + string(securityv1.FSTypeDownwardAPI), + string(securityv1.FSTypeFC), + string(securityv1.FSTypeConfigMap), + string(securityv1.FSTypeVsphereVolume), + string(securityv1.FSTypeQuobyte), + string(securityv1.FSTypeAzureDisk), + string(securityv1.FSTypePhotonPersistentDisk), + string(securityv1.FSProjected), + string(securityv1.FSPortworxVolume), + string(securityv1.FSScaleIO), + string(securityv1.FSStorageOS), + string(securityv1.FSTypeCSI), + string(securityv1.FSTypeEphemeral), + string(securityv1.FSTypeImage), + ) + return fstypes +} + +// getVolumeFSType gets the FSType for a volume. +func GetVolumeFSType(v api.Volume) (securityv1.FSType, error) { + switch { + case v.HostPath != nil: + return securityv1.FSTypeHostPath, nil + case v.EmptyDir != nil: + return securityv1.FSTypeEmptyDir, nil + case v.GCEPersistentDisk != nil: + return securityv1.FSTypeGCEPersistentDisk, nil + case v.AWSElasticBlockStore != nil: + return securityv1.FSTypeAWSElasticBlockStore, nil + case v.GitRepo != nil: + return securityv1.FSTypeGitRepo, nil + case v.Secret != nil: + return securityv1.FSTypeSecret, nil + case v.NFS != nil: + return securityv1.FSTypeNFS, nil + case v.ISCSI != nil: + return securityv1.FSTypeISCSI, nil + case v.Glusterfs != nil: + return securityv1.FSTypeGlusterfs, nil + case v.PersistentVolumeClaim != nil: + return securityv1.FSTypePersistentVolumeClaim, nil + case v.RBD != nil: + return securityv1.FSTypeRBD, nil + case v.FlexVolume != nil: + return securityv1.FSTypeFlexVolume, nil + case v.Cinder != nil: + return securityv1.FSTypeCinder, nil + case v.CephFS != nil: + return securityv1.FSTypeCephFS, nil + case v.Flocker != nil: + return securityv1.FSTypeFlocker, nil + case v.DownwardAPI != nil: + return securityv1.FSTypeDownwardAPI, nil + case v.FC != nil: + return securityv1.FSTypeFC, nil + case v.AzureFile != nil: + return securityv1.FSTypeAzureFile, nil + case v.ConfigMap != nil: + return securityv1.FSTypeConfigMap, nil + case v.VsphereVolume != nil: + return securityv1.FSTypeVsphereVolume, nil + case v.Quobyte != nil: + return securityv1.FSTypeQuobyte, nil + case v.AzureDisk != nil: + return securityv1.FSTypeAzureDisk, nil + case v.PhotonPersistentDisk != nil: + return securityv1.FSTypePhotonPersistentDisk, nil + case v.Projected != nil: + return securityv1.FSProjected, nil + case v.PortworxVolume != nil: + return securityv1.FSPortworxVolume, nil + case v.ScaleIO != nil: + return securityv1.FSScaleIO, nil + case v.StorageOS != nil: + return securityv1.FSStorageOS, nil + case v.CSI != nil: + return securityv1.FSTypeCSI, nil + case v.Ephemeral != nil: + return securityv1.FSTypeEphemeral, nil + case v.Image != nil: + return securityv1.FSTypeImage, nil + } + + return "", fmt.Errorf("unknown volume type for volume: %#v", v) +} + +// fsTypeToStringSet converts an FSType slice to a string set. +func FSTypeToStringSetInternal(fsTypes []securityv1.FSType) sets.String { + set := sets.NewString() + for _, v := range fsTypes { + set.Insert(string(v)) + } + return set +} + +// SCCAllowsAllVolumes checks for FSTypeAll in the scc's allowed volumes. +func SCCAllowsAllVolumes(scc *securityv1.SecurityContextConstraints) bool { + return SCCAllowsFSTypeInternal(scc, securityv1.FSTypeAll) +} + +// SCCAllowsFSTypeInternal is a utility for checking if an SCC allows a particular FSType. +// If all volumes are allowed then this will return true for any FSType passed. +func SCCAllowsFSTypeInternal(scc *securityv1.SecurityContextConstraints, fsType securityv1.FSType) bool { + if scc == nil { + return false + } + + for _, v := range scc.Volumes { + if v == fsType || v == securityv1.FSTypeAll { + return true + } + } + return false +} + +// SCCAllowsFSType is a utility for checking if an SCC allows a particular FSType. +// If all volumes are allowed then this will return true for any FSType passed. +func SCCAllowsFSType(scc *securityv1.SecurityContextConstraints, fsType securityv1.FSType) bool { + if scc == nil { + return false + } + + for _, v := range scc.Volumes { + if v == fsType || v == securityv1.FSTypeAll { + return true + } + } + return false +} + +// EqualStringSlices compares string slices for equality. Slices are equal when +// their sizes and elements on similar positions are equal. +func EqualStringSlices(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + return true +} + +// IsOnlyServiceAccountTokenSources returns true if the sources of the projected volume +// source match to what would be injected by the ServiceAccount volume projection controller +// +// This function is derived from pkg/security/podsecuritypolicy/util/util.go with the +// addition of OpenShift-specific "openshift-service-ca.crt" ConfigMap source. +// +// This is what a sample injected volume looks like: +// - projected: +// defaultMode: 420 +// sources: +// - serviceAccountToken: +// expirationSeconds: 3607 +// path: token +// - configMap: +// name: kube-root-ca.crt +// items: +// - key: ca.crt +// path: ca.crt +// - downwardAPI: +// items: +// - path: namespace +// fieldRef: +// apiVersion: v1 +// fieldPath: metadata.namespace +// - configMap: +// name: openshift-service-ca.crt +// items: +// - key: service-ca.crt +// path: service-ca.crt +func IsOnlyServiceAccountTokenSources(v *api.ProjectedVolumeSource) bool { + for _, s := range v.Sources { + // reject any projected source that does not match any of our expected source types + if s.ServiceAccountToken == nil && s.ConfigMap == nil && s.DownwardAPI == nil { + return false + } + if t := s.ServiceAccountToken; t != nil && (t.Path != "token" || t.Audience != "") { + return false + } + + if s.ConfigMap != nil { + switch cmRef := s.ConfigMap.LocalObjectReference.Name; cmRef { + case "kube-root-ca.crt": + if len(s.ConfigMap.Items) != 1 || s.ConfigMap.Items[0].Key != "ca.crt" || s.ConfigMap.Items[0].Path != "ca.crt" { + return false + } + case "openshift-service-ca.crt": + if len(s.ConfigMap.Items) != 1 || s.ConfigMap.Items[0].Key != "service-ca.crt" || s.ConfigMap.Items[0].Path != "service-ca.crt" { + return false + } + default: + return false + } + } + + if s.DownwardAPI != nil { + for _, d := range s.DownwardAPI.Items { + if d.Path != "namespace" || d.FieldRef == nil || d.FieldRef.APIVersion != "v1" || d.FieldRef.FieldPath != "metadata.namespace" { + return false + } + } + } + } + return true +} diff --git a/vendor/github.com/openshift/client-go/LICENSE b/vendor/github.com/openshift/client-go/LICENSE new file mode 100644 index 0000000000000..c4ea8b6f9d88b --- /dev/null +++ b/vendor/github.com/openshift/client-go/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Red Hat, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1/apirequestcount.go b/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1/apirequestcount.go new file mode 100644 index 0000000000000..b2bde545f4e0b --- /dev/null +++ b/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1/apirequestcount.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apiserverv1 "github.com/openshift/api/apiserver/v1" + internal "github.com/openshift/client-go/apiserver/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// APIRequestCountApplyConfiguration represents a declarative configuration of the APIRequestCount type for use +// with apply. +type APIRequestCountApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *APIRequestCountSpecApplyConfiguration `json:"spec,omitempty"` + Status *APIRequestCountStatusApplyConfiguration `json:"status,omitempty"` +} + +// APIRequestCount constructs a declarative configuration of the APIRequestCount type for use with +// apply. +func APIRequestCount(name string) *APIRequestCountApplyConfiguration { + b := &APIRequestCountApplyConfiguration{} + b.WithName(name) + b.WithKind("APIRequestCount") + b.WithAPIVersion("apiserver.openshift.io/v1") + return b +} + +// ExtractAPIRequestCount extracts the applied configuration owned by fieldManager from +// aPIRequestCount. If no managedFields are found in aPIRequestCount for fieldManager, a +// APIRequestCountApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// aPIRequestCount must be a unmodified APIRequestCount API object that was retrieved from the Kubernetes API. +// ExtractAPIRequestCount provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractAPIRequestCount(aPIRequestCount *apiserverv1.APIRequestCount, fieldManager string) (*APIRequestCountApplyConfiguration, error) { + return extractAPIRequestCount(aPIRequestCount, fieldManager, "") +} + +// ExtractAPIRequestCountStatus is the same as ExtractAPIRequestCount except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractAPIRequestCountStatus(aPIRequestCount *apiserverv1.APIRequestCount, fieldManager string) (*APIRequestCountApplyConfiguration, error) { + return extractAPIRequestCount(aPIRequestCount, fieldManager, "status") +} + +func extractAPIRequestCount(aPIRequestCount *apiserverv1.APIRequestCount, fieldManager string, subresource string) (*APIRequestCountApplyConfiguration, error) { + b := &APIRequestCountApplyConfiguration{} + err := managedfields.ExtractInto(aPIRequestCount, internal.Parser().Type("com.github.openshift.api.apiserver.v1.APIRequestCount"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(aPIRequestCount.Name) + + b.WithKind("APIRequestCount") + b.WithAPIVersion("apiserver.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *APIRequestCountApplyConfiguration) WithKind(value string) *APIRequestCountApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *APIRequestCountApplyConfiguration) WithAPIVersion(value string) *APIRequestCountApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *APIRequestCountApplyConfiguration) WithName(value string) *APIRequestCountApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *APIRequestCountApplyConfiguration) WithGenerateName(value string) *APIRequestCountApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *APIRequestCountApplyConfiguration) WithNamespace(value string) *APIRequestCountApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *APIRequestCountApplyConfiguration) WithUID(value types.UID) *APIRequestCountApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *APIRequestCountApplyConfiguration) WithResourceVersion(value string) *APIRequestCountApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *APIRequestCountApplyConfiguration) WithGeneration(value int64) *APIRequestCountApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *APIRequestCountApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *APIRequestCountApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *APIRequestCountApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *APIRequestCountApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *APIRequestCountApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *APIRequestCountApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *APIRequestCountApplyConfiguration) WithLabels(entries map[string]string) *APIRequestCountApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *APIRequestCountApplyConfiguration) WithAnnotations(entries map[string]string) *APIRequestCountApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *APIRequestCountApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *APIRequestCountApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *APIRequestCountApplyConfiguration) WithFinalizers(values ...string) *APIRequestCountApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *APIRequestCountApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *APIRequestCountApplyConfiguration) WithSpec(value *APIRequestCountSpecApplyConfiguration) *APIRequestCountApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *APIRequestCountApplyConfiguration) WithStatus(value *APIRequestCountStatusApplyConfiguration) *APIRequestCountApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *APIRequestCountApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1/apirequestcountspec.go b/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1/apirequestcountspec.go new file mode 100644 index 0000000000000..4a5400c64e244 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1/apirequestcountspec.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// APIRequestCountSpecApplyConfiguration represents a declarative configuration of the APIRequestCountSpec type for use +// with apply. +type APIRequestCountSpecApplyConfiguration struct { + NumberOfUsersToReport *int64 `json:"numberOfUsersToReport,omitempty"` +} + +// APIRequestCountSpecApplyConfiguration constructs a declarative configuration of the APIRequestCountSpec type for use with +// apply. +func APIRequestCountSpec() *APIRequestCountSpecApplyConfiguration { + return &APIRequestCountSpecApplyConfiguration{} +} + +// WithNumberOfUsersToReport sets the NumberOfUsersToReport field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NumberOfUsersToReport field is set to the value of the last call. +func (b *APIRequestCountSpecApplyConfiguration) WithNumberOfUsersToReport(value int64) *APIRequestCountSpecApplyConfiguration { + b.NumberOfUsersToReport = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1/apirequestcountstatus.go b/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1/apirequestcountstatus.go new file mode 100644 index 0000000000000..d6d33904b36a8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1/apirequestcountstatus.go @@ -0,0 +1,73 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// APIRequestCountStatusApplyConfiguration represents a declarative configuration of the APIRequestCountStatus type for use +// with apply. +type APIRequestCountStatusApplyConfiguration struct { + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` + RemovedInRelease *string `json:"removedInRelease,omitempty"` + RequestCount *int64 `json:"requestCount,omitempty"` + CurrentHour *PerResourceAPIRequestLogApplyConfiguration `json:"currentHour,omitempty"` + Last24h []PerResourceAPIRequestLogApplyConfiguration `json:"last24h,omitempty"` +} + +// APIRequestCountStatusApplyConfiguration constructs a declarative configuration of the APIRequestCountStatus type for use with +// apply. +func APIRequestCountStatus() *APIRequestCountStatusApplyConfiguration { + return &APIRequestCountStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *APIRequestCountStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *APIRequestCountStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithRemovedInRelease sets the RemovedInRelease field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RemovedInRelease field is set to the value of the last call. +func (b *APIRequestCountStatusApplyConfiguration) WithRemovedInRelease(value string) *APIRequestCountStatusApplyConfiguration { + b.RemovedInRelease = &value + return b +} + +// WithRequestCount sets the RequestCount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RequestCount field is set to the value of the last call. +func (b *APIRequestCountStatusApplyConfiguration) WithRequestCount(value int64) *APIRequestCountStatusApplyConfiguration { + b.RequestCount = &value + return b +} + +// WithCurrentHour sets the CurrentHour field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CurrentHour field is set to the value of the last call. +func (b *APIRequestCountStatusApplyConfiguration) WithCurrentHour(value *PerResourceAPIRequestLogApplyConfiguration) *APIRequestCountStatusApplyConfiguration { + b.CurrentHour = value + return b +} + +// WithLast24h adds the given value to the Last24h field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Last24h field. +func (b *APIRequestCountStatusApplyConfiguration) WithLast24h(values ...*PerResourceAPIRequestLogApplyConfiguration) *APIRequestCountStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithLast24h") + } + b.Last24h = append(b.Last24h, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1/pernodeapirequestlog.go b/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1/pernodeapirequestlog.go new file mode 100644 index 0000000000000..d55d2ab7222fa --- /dev/null +++ b/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1/pernodeapirequestlog.go @@ -0,0 +1,46 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PerNodeAPIRequestLogApplyConfiguration represents a declarative configuration of the PerNodeAPIRequestLog type for use +// with apply. +type PerNodeAPIRequestLogApplyConfiguration struct { + NodeName *string `json:"nodeName,omitempty"` + RequestCount *int64 `json:"requestCount,omitempty"` + ByUser []PerUserAPIRequestCountApplyConfiguration `json:"byUser,omitempty"` +} + +// PerNodeAPIRequestLogApplyConfiguration constructs a declarative configuration of the PerNodeAPIRequestLog type for use with +// apply. +func PerNodeAPIRequestLog() *PerNodeAPIRequestLogApplyConfiguration { + return &PerNodeAPIRequestLogApplyConfiguration{} +} + +// WithNodeName sets the NodeName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeName field is set to the value of the last call. +func (b *PerNodeAPIRequestLogApplyConfiguration) WithNodeName(value string) *PerNodeAPIRequestLogApplyConfiguration { + b.NodeName = &value + return b +} + +// WithRequestCount sets the RequestCount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RequestCount field is set to the value of the last call. +func (b *PerNodeAPIRequestLogApplyConfiguration) WithRequestCount(value int64) *PerNodeAPIRequestLogApplyConfiguration { + b.RequestCount = &value + return b +} + +// WithByUser adds the given value to the ByUser field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ByUser field. +func (b *PerNodeAPIRequestLogApplyConfiguration) WithByUser(values ...*PerUserAPIRequestCountApplyConfiguration) *PerNodeAPIRequestLogApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithByUser") + } + b.ByUser = append(b.ByUser, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1/perresourceapirequestlog.go b/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1/perresourceapirequestlog.go new file mode 100644 index 0000000000000..27dfc5b3d1be6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1/perresourceapirequestlog.go @@ -0,0 +1,37 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PerResourceAPIRequestLogApplyConfiguration represents a declarative configuration of the PerResourceAPIRequestLog type for use +// with apply. +type PerResourceAPIRequestLogApplyConfiguration struct { + ByNode []PerNodeAPIRequestLogApplyConfiguration `json:"byNode,omitempty"` + RequestCount *int64 `json:"requestCount,omitempty"` +} + +// PerResourceAPIRequestLogApplyConfiguration constructs a declarative configuration of the PerResourceAPIRequestLog type for use with +// apply. +func PerResourceAPIRequestLog() *PerResourceAPIRequestLogApplyConfiguration { + return &PerResourceAPIRequestLogApplyConfiguration{} +} + +// WithByNode adds the given value to the ByNode field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ByNode field. +func (b *PerResourceAPIRequestLogApplyConfiguration) WithByNode(values ...*PerNodeAPIRequestLogApplyConfiguration) *PerResourceAPIRequestLogApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithByNode") + } + b.ByNode = append(b.ByNode, *values[i]) + } + return b +} + +// WithRequestCount sets the RequestCount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RequestCount field is set to the value of the last call. +func (b *PerResourceAPIRequestLogApplyConfiguration) WithRequestCount(value int64) *PerResourceAPIRequestLogApplyConfiguration { + b.RequestCount = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1/peruserapirequestcount.go b/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1/peruserapirequestcount.go new file mode 100644 index 0000000000000..5b35a28e8e4fe --- /dev/null +++ b/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1/peruserapirequestcount.go @@ -0,0 +1,55 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PerUserAPIRequestCountApplyConfiguration represents a declarative configuration of the PerUserAPIRequestCount type for use +// with apply. +type PerUserAPIRequestCountApplyConfiguration struct { + UserName *string `json:"username,omitempty"` + UserAgent *string `json:"userAgent,omitempty"` + RequestCount *int64 `json:"requestCount,omitempty"` + ByVerb []PerVerbAPIRequestCountApplyConfiguration `json:"byVerb,omitempty"` +} + +// PerUserAPIRequestCountApplyConfiguration constructs a declarative configuration of the PerUserAPIRequestCount type for use with +// apply. +func PerUserAPIRequestCount() *PerUserAPIRequestCountApplyConfiguration { + return &PerUserAPIRequestCountApplyConfiguration{} +} + +// WithUserName sets the UserName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UserName field is set to the value of the last call. +func (b *PerUserAPIRequestCountApplyConfiguration) WithUserName(value string) *PerUserAPIRequestCountApplyConfiguration { + b.UserName = &value + return b +} + +// WithUserAgent sets the UserAgent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UserAgent field is set to the value of the last call. +func (b *PerUserAPIRequestCountApplyConfiguration) WithUserAgent(value string) *PerUserAPIRequestCountApplyConfiguration { + b.UserAgent = &value + return b +} + +// WithRequestCount sets the RequestCount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RequestCount field is set to the value of the last call. +func (b *PerUserAPIRequestCountApplyConfiguration) WithRequestCount(value int64) *PerUserAPIRequestCountApplyConfiguration { + b.RequestCount = &value + return b +} + +// WithByVerb adds the given value to the ByVerb field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ByVerb field. +func (b *PerUserAPIRequestCountApplyConfiguration) WithByVerb(values ...*PerVerbAPIRequestCountApplyConfiguration) *PerUserAPIRequestCountApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithByVerb") + } + b.ByVerb = append(b.ByVerb, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1/perverbapirequestcount.go b/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1/perverbapirequestcount.go new file mode 100644 index 0000000000000..569f8b4930d9b --- /dev/null +++ b/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1/perverbapirequestcount.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PerVerbAPIRequestCountApplyConfiguration represents a declarative configuration of the PerVerbAPIRequestCount type for use +// with apply. +type PerVerbAPIRequestCountApplyConfiguration struct { + Verb *string `json:"verb,omitempty"` + RequestCount *int64 `json:"requestCount,omitempty"` +} + +// PerVerbAPIRequestCountApplyConfiguration constructs a declarative configuration of the PerVerbAPIRequestCount type for use with +// apply. +func PerVerbAPIRequestCount() *PerVerbAPIRequestCountApplyConfiguration { + return &PerVerbAPIRequestCountApplyConfiguration{} +} + +// WithVerb sets the Verb field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Verb field is set to the value of the last call. +func (b *PerVerbAPIRequestCountApplyConfiguration) WithVerb(value string) *PerVerbAPIRequestCountApplyConfiguration { + b.Verb = &value + return b +} + +// WithRequestCount sets the RequestCount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RequestCount field is set to the value of the last call. +func (b *PerVerbAPIRequestCountApplyConfiguration) WithRequestCount(value int64) *PerVerbAPIRequestCountApplyConfiguration { + b.RequestCount = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/internal/internal.go new file mode 100644 index 0000000000000..118db57bbbf19 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/internal/internal.go @@ -0,0 +1,316 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package internal + +import ( + fmt "fmt" + sync "sync" + + typed "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +func Parser() *typed.Parser { + parserOnce.Do(func() { + var err error + parser, err = typed.NewParser(schemaYAML) + if err != nil { + panic(fmt.Sprintf("Failed to parse schema: %v", err)) + } + }) + return parser +} + +var parserOnce sync.Once +var parser *typed.Parser +var schemaYAML = typed.YAMLObject(`types: +- name: com.github.openshift.api.apiserver.v1.APIRequestCount + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.apiserver.v1.APIRequestCountSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.apiserver.v1.APIRequestCountStatus + default: {} +- name: com.github.openshift.api.apiserver.v1.APIRequestCountSpec + map: + fields: + - name: numberOfUsersToReport + type: + scalar: numeric + default: 0 +- name: com.github.openshift.api.apiserver.v1.APIRequestCountStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: currentHour + type: + namedType: com.github.openshift.api.apiserver.v1.PerResourceAPIRequestLog + default: {} + - name: last24h + type: + list: + elementType: + namedType: com.github.openshift.api.apiserver.v1.PerResourceAPIRequestLog + elementRelationship: atomic + - name: removedInRelease + type: + scalar: string + - name: requestCount + type: + scalar: numeric + default: 0 +- name: com.github.openshift.api.apiserver.v1.PerNodeAPIRequestLog + map: + fields: + - name: byUser + type: + list: + elementType: + namedType: com.github.openshift.api.apiserver.v1.PerUserAPIRequestCount + elementRelationship: atomic + - name: nodeName + type: + scalar: string + default: "" + - name: requestCount + type: + scalar: numeric + default: 0 +- name: com.github.openshift.api.apiserver.v1.PerResourceAPIRequestLog + map: + fields: + - name: byNode + type: + list: + elementType: + namedType: com.github.openshift.api.apiserver.v1.PerNodeAPIRequestLog + elementRelationship: atomic + - name: requestCount + type: + scalar: numeric + default: 0 +- name: com.github.openshift.api.apiserver.v1.PerUserAPIRequestCount + map: + fields: + - name: byVerb + type: + list: + elementType: + namedType: com.github.openshift.api.apiserver.v1.PerVerbAPIRequestCount + elementRelationship: atomic + - name: requestCount + type: + scalar: numeric + default: 0 + - name: userAgent + type: + scalar: string + default: "" + - name: username + type: + scalar: string + default: "" +- name: com.github.openshift.api.apiserver.v1.PerVerbAPIRequestCount + map: + fields: + - name: requestCount + type: + scalar: numeric + default: 0 + - name: verb + type: + scalar: string + default: "" +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + map: + fields: + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message + type: + scalar: string + default: "" + - name: observedGeneration + type: + scalar: numeric + - name: reason + type: + scalar: string + default: "" + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldsType + type: + scalar: string + - name: fieldsV1 + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + - name: manager + type: + scalar: string + - name: operation + type: + scalar: string + - name: subresource + type: + scalar: string + - name: time + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + map: + fields: + - name: annotations + type: + map: + elementType: + scalar: string + - name: creationTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: deletionGracePeriodSeconds + type: + scalar: numeric + - name: deletionTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: finalizers + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: generateName + type: + scalar: string + - name: generation + type: + scalar: numeric + - name: labels + type: + map: + elementType: + scalar: string + - name: managedFields + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + elementRelationship: atomic + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: ownerReferences + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + elementRelationship: associative + keys: + - uid + - name: resourceVersion + type: + scalar: string + - name: selfLink + type: + scalar: string + - name: uid + type: + scalar: string +- name: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + map: + fields: + - name: apiVersion + type: + scalar: string + default: "" + - name: blockOwnerDeletion + type: + scalar: boolean + - name: controller + type: + scalar: boolean + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: uid + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Time + scalar: untyped +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +- name: __untyped_deduced_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +`) diff --git a/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/utils.go b/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/utils.go new file mode 100644 index 0000000000000..d3c4b2616bf7c --- /dev/null +++ b/vendor/github.com/openshift/client-go/apiserver/applyconfigurations/utils.go @@ -0,0 +1,40 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package applyconfigurations + +import ( + v1 "github.com/openshift/api/apiserver/v1" + apiserverv1 "github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1" + internal "github.com/openshift/client-go/apiserver/applyconfigurations/internal" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no +// apply configuration type exists for the given GroupVersionKind. +func ForKind(kind schema.GroupVersionKind) interface{} { + switch kind { + // Group=apiserver.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithKind("APIRequestCount"): + return &apiserverv1.APIRequestCountApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("APIRequestCountSpec"): + return &apiserverv1.APIRequestCountSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("APIRequestCountStatus"): + return &apiserverv1.APIRequestCountStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("PerNodeAPIRequestLog"): + return &apiserverv1.PerNodeAPIRequestLogApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("PerResourceAPIRequestLog"): + return &apiserverv1.PerResourceAPIRequestLogApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("PerUserAPIRequestCount"): + return &apiserverv1.PerUserAPIRequestCountApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("PerVerbAPIRequestCount"): + return &apiserverv1.PerVerbAPIRequestCountApplyConfiguration{} + + } + return nil +} + +func NewTypeConverter(scheme *runtime.Scheme) *testing.TypeConverter { + return &testing.TypeConverter{Scheme: scheme, TypeResolver: internal.Parser()} +} diff --git a/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/clientset.go new file mode 100644 index 0000000000000..1e5c4c601e01f --- /dev/null +++ b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/clientset.go @@ -0,0 +1,104 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + fmt "fmt" + http "net/http" + + apiserverv1 "github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + ApiserverV1() apiserverv1.ApiserverV1Interface +} + +// Clientset contains the clients for groups. +type Clientset struct { + *discovery.DiscoveryClient + apiserverV1 *apiserverv1.ApiserverV1Client +} + +// ApiserverV1 retrieves the ApiserverV1Client +func (c *Clientset) ApiserverV1() apiserverv1.ApiserverV1Interface { + return c.apiserverV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.apiserverV1, err = apiserverv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.apiserverV1 = apiserverv1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 0000000000000..7fb7c3c2973ee --- /dev/null +++ b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,106 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + applyconfigurations "github.com/openshift/client-go/apiserver/applyconfigurations" + clientset "github.com/openshift/client-go/apiserver/clientset/versioned" + apiserverv1 "github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1" + fakeapiserverv1 "github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +// +// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. +// via --with-applyconfig). +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +// NewClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewClientset(objects ...runtime.Object) *Clientset { + o := testing.NewFieldManagedObjectTracker( + scheme, + codecs.UniversalDecoder(), + applyconfigurations.NewTypeConverter(scheme), + ) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) + +// ApiserverV1 retrieves the ApiserverV1Client +func (c *Clientset) ApiserverV1() apiserverv1.ApiserverV1Interface { + return &fakeapiserverv1.FakeApiserverV1{Fake: &c.Fake} +} diff --git a/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/fake/doc.go b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/fake/doc.go new file mode 100644 index 0000000000000..3630ed1cd17db --- /dev/null +++ b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/fake/register.go b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/fake/register.go new file mode 100644 index 0000000000000..dc362ba6861d9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/fake/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + apiserverv1 "github.com/openshift/api/apiserver/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +var localSchemeBuilder = runtime.SchemeBuilder{ + apiserverv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000000..14db57a58f8d2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/scheme/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000000..2c406fbd7ae5b --- /dev/null +++ b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/scheme/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + apiserverv1 "github.com/openshift/api/apiserver/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + apiserverv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1/apirequestcount.go b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1/apirequestcount.go new file mode 100644 index 0000000000000..3b200ca015853 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1/apirequestcount.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + apiserverv1 "github.com/openshift/api/apiserver/v1" + applyconfigurationsapiserverv1 "github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1" + scheme "github.com/openshift/client-go/apiserver/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// APIRequestCountsGetter has a method to return a APIRequestCountInterface. +// A group's client should implement this interface. +type APIRequestCountsGetter interface { + APIRequestCounts() APIRequestCountInterface +} + +// APIRequestCountInterface has methods to work with APIRequestCount resources. +type APIRequestCountInterface interface { + Create(ctx context.Context, aPIRequestCount *apiserverv1.APIRequestCount, opts metav1.CreateOptions) (*apiserverv1.APIRequestCount, error) + Update(ctx context.Context, aPIRequestCount *apiserverv1.APIRequestCount, opts metav1.UpdateOptions) (*apiserverv1.APIRequestCount, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, aPIRequestCount *apiserverv1.APIRequestCount, opts metav1.UpdateOptions) (*apiserverv1.APIRequestCount, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*apiserverv1.APIRequestCount, error) + List(ctx context.Context, opts metav1.ListOptions) (*apiserverv1.APIRequestCountList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *apiserverv1.APIRequestCount, err error) + Apply(ctx context.Context, aPIRequestCount *applyconfigurationsapiserverv1.APIRequestCountApplyConfiguration, opts metav1.ApplyOptions) (result *apiserverv1.APIRequestCount, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, aPIRequestCount *applyconfigurationsapiserverv1.APIRequestCountApplyConfiguration, opts metav1.ApplyOptions) (result *apiserverv1.APIRequestCount, err error) + APIRequestCountExpansion +} + +// aPIRequestCounts implements APIRequestCountInterface +type aPIRequestCounts struct { + *gentype.ClientWithListAndApply[*apiserverv1.APIRequestCount, *apiserverv1.APIRequestCountList, *applyconfigurationsapiserverv1.APIRequestCountApplyConfiguration] +} + +// newAPIRequestCounts returns a APIRequestCounts +func newAPIRequestCounts(c *ApiserverV1Client) *aPIRequestCounts { + return &aPIRequestCounts{ + gentype.NewClientWithListAndApply[*apiserverv1.APIRequestCount, *apiserverv1.APIRequestCountList, *applyconfigurationsapiserverv1.APIRequestCountApplyConfiguration]( + "apirequestcounts", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *apiserverv1.APIRequestCount { return &apiserverv1.APIRequestCount{} }, + func() *apiserverv1.APIRequestCountList { return &apiserverv1.APIRequestCountList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1/apiserver_client.go b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1/apiserver_client.go new file mode 100644 index 0000000000000..aac5e0a687ccb --- /dev/null +++ b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1/apiserver_client.go @@ -0,0 +1,91 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + http "net/http" + + apiserverv1 "github.com/openshift/api/apiserver/v1" + scheme "github.com/openshift/client-go/apiserver/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type ApiserverV1Interface interface { + RESTClient() rest.Interface + APIRequestCountsGetter +} + +// ApiserverV1Client is used to interact with features provided by the apiserver.openshift.io group. +type ApiserverV1Client struct { + restClient rest.Interface +} + +func (c *ApiserverV1Client) APIRequestCounts() APIRequestCountInterface { + return newAPIRequestCounts(c) +} + +// NewForConfig creates a new ApiserverV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*ApiserverV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ApiserverV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ApiserverV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &ApiserverV1Client{client}, nil +} + +// NewForConfigOrDie creates a new ApiserverV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ApiserverV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ApiserverV1Client for the given RESTClient. +func New(c rest.Interface) *ApiserverV1Client { + return &ApiserverV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := apiserverv1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ApiserverV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1/doc.go b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1/doc.go new file mode 100644 index 0000000000000..225e6b2be34f2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1/fake/doc.go b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1/fake/doc.go new file mode 100644 index 0000000000000..2b5ba4c8e4422 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1/fake/fake_apirequestcount.go b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1/fake/fake_apirequestcount.go new file mode 100644 index 0000000000000..4c516c55debec --- /dev/null +++ b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1/fake/fake_apirequestcount.go @@ -0,0 +1,35 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/apiserver/v1" + apiserverv1 "github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1" + typedapiserverv1 "github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeAPIRequestCounts implements APIRequestCountInterface +type fakeAPIRequestCounts struct { + *gentype.FakeClientWithListAndApply[*v1.APIRequestCount, *v1.APIRequestCountList, *apiserverv1.APIRequestCountApplyConfiguration] + Fake *FakeApiserverV1 +} + +func newFakeAPIRequestCounts(fake *FakeApiserverV1) typedapiserverv1.APIRequestCountInterface { + return &fakeAPIRequestCounts{ + gentype.NewFakeClientWithListAndApply[*v1.APIRequestCount, *v1.APIRequestCountList, *apiserverv1.APIRequestCountApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("apirequestcounts"), + v1.SchemeGroupVersion.WithKind("APIRequestCount"), + func() *v1.APIRequestCount { return &v1.APIRequestCount{} }, + func() *v1.APIRequestCountList { return &v1.APIRequestCountList{} }, + func(dst, src *v1.APIRequestCountList) { dst.ListMeta = src.ListMeta }, + func(list *v1.APIRequestCountList) []*v1.APIRequestCount { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.APIRequestCountList, items []*v1.APIRequestCount) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1/fake/fake_apiserver_client.go b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1/fake/fake_apiserver_client.go new file mode 100644 index 0000000000000..756df043aed92 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1/fake/fake_apiserver_client.go @@ -0,0 +1,24 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeApiserverV1 struct { + *testing.Fake +} + +func (c *FakeApiserverV1) APIRequestCounts() v1.APIRequestCountInterface { + return newFakeAPIRequestCounts(c) +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeApiserverV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1/generated_expansion.go new file mode 100644 index 0000000000000..e2bba0a874134 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1/generated_expansion.go @@ -0,0 +1,5 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type APIRequestCountExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/customdeploymentstrategyparams.go b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/customdeploymentstrategyparams.go new file mode 100644 index 0000000000000..10d56cc323341 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/customdeploymentstrategyparams.go @@ -0,0 +1,49 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// CustomDeploymentStrategyParamsApplyConfiguration represents a declarative configuration of the CustomDeploymentStrategyParams type for use +// with apply. +type CustomDeploymentStrategyParamsApplyConfiguration struct { + Image *string `json:"image,omitempty"` + Environment []corev1.EnvVar `json:"environment,omitempty"` + Command []string `json:"command,omitempty"` +} + +// CustomDeploymentStrategyParamsApplyConfiguration constructs a declarative configuration of the CustomDeploymentStrategyParams type for use with +// apply. +func CustomDeploymentStrategyParams() *CustomDeploymentStrategyParamsApplyConfiguration { + return &CustomDeploymentStrategyParamsApplyConfiguration{} +} + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *CustomDeploymentStrategyParamsApplyConfiguration) WithImage(value string) *CustomDeploymentStrategyParamsApplyConfiguration { + b.Image = &value + return b +} + +// WithEnvironment adds the given value to the Environment field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Environment field. +func (b *CustomDeploymentStrategyParamsApplyConfiguration) WithEnvironment(values ...corev1.EnvVar) *CustomDeploymentStrategyParamsApplyConfiguration { + for i := range values { + b.Environment = append(b.Environment, values[i]) + } + return b +} + +// WithCommand adds the given value to the Command field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Command field. +func (b *CustomDeploymentStrategyParamsApplyConfiguration) WithCommand(values ...string) *CustomDeploymentStrategyParamsApplyConfiguration { + for i := range values { + b.Command = append(b.Command, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentcause.go b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentcause.go new file mode 100644 index 0000000000000..d772740d0e533 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentcause.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + appsv1 "github.com/openshift/api/apps/v1" +) + +// DeploymentCauseApplyConfiguration represents a declarative configuration of the DeploymentCause type for use +// with apply. +type DeploymentCauseApplyConfiguration struct { + Type *appsv1.DeploymentTriggerType `json:"type,omitempty"` + ImageTrigger *DeploymentCauseImageTriggerApplyConfiguration `json:"imageTrigger,omitempty"` +} + +// DeploymentCauseApplyConfiguration constructs a declarative configuration of the DeploymentCause type for use with +// apply. +func DeploymentCause() *DeploymentCauseApplyConfiguration { + return &DeploymentCauseApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *DeploymentCauseApplyConfiguration) WithType(value appsv1.DeploymentTriggerType) *DeploymentCauseApplyConfiguration { + b.Type = &value + return b +} + +// WithImageTrigger sets the ImageTrigger field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageTrigger field is set to the value of the last call. +func (b *DeploymentCauseApplyConfiguration) WithImageTrigger(value *DeploymentCauseImageTriggerApplyConfiguration) *DeploymentCauseApplyConfiguration { + b.ImageTrigger = value + return b +} diff --git a/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentcauseimagetrigger.go b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentcauseimagetrigger.go new file mode 100644 index 0000000000000..05e3a794f0bc1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentcauseimagetrigger.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// DeploymentCauseImageTriggerApplyConfiguration represents a declarative configuration of the DeploymentCauseImageTrigger type for use +// with apply. +type DeploymentCauseImageTriggerApplyConfiguration struct { + From *corev1.ObjectReference `json:"from,omitempty"` +} + +// DeploymentCauseImageTriggerApplyConfiguration constructs a declarative configuration of the DeploymentCauseImageTrigger type for use with +// apply. +func DeploymentCauseImageTrigger() *DeploymentCauseImageTriggerApplyConfiguration { + return &DeploymentCauseImageTriggerApplyConfiguration{} +} + +// WithFrom sets the From field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the From field is set to the value of the last call. +func (b *DeploymentCauseImageTriggerApplyConfiguration) WithFrom(value corev1.ObjectReference) *DeploymentCauseImageTriggerApplyConfiguration { + b.From = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentcondition.go b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentcondition.go new file mode 100644 index 0000000000000..b5ffab10c8267 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentcondition.go @@ -0,0 +1,74 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + appsv1 "github.com/openshift/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use +// with apply. +type DeploymentConditionApplyConfiguration struct { + Type *appsv1.DeploymentConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with +// apply. +func DeploymentCondition() *DeploymentConditionApplyConfiguration { + return &DeploymentConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *DeploymentConditionApplyConfiguration) WithType(value appsv1.DeploymentConditionType) *DeploymentConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *DeploymentConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *DeploymentConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastUpdateTime sets the LastUpdateTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastUpdateTime field is set to the value of the last call. +func (b *DeploymentConditionApplyConfiguration) WithLastUpdateTime(value metav1.Time) *DeploymentConditionApplyConfiguration { + b.LastUpdateTime = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *DeploymentConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *DeploymentConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *DeploymentConditionApplyConfiguration) WithReason(value string) *DeploymentConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *DeploymentConditionApplyConfiguration) WithMessage(value string) *DeploymentConditionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentconfig.go b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentconfig.go new file mode 100644 index 0000000000000..55fd0caaf42de --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentconfig.go @@ -0,0 +1,248 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + appsv1 "github.com/openshift/api/apps/v1" + internal "github.com/openshift/client-go/apps/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// DeploymentConfigApplyConfiguration represents a declarative configuration of the DeploymentConfig type for use +// with apply. +type DeploymentConfigApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *DeploymentConfigSpecApplyConfiguration `json:"spec,omitempty"` + Status *DeploymentConfigStatusApplyConfiguration `json:"status,omitempty"` +} + +// DeploymentConfig constructs a declarative configuration of the DeploymentConfig type for use with +// apply. +func DeploymentConfig(name, namespace string) *DeploymentConfigApplyConfiguration { + b := &DeploymentConfigApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("DeploymentConfig") + b.WithAPIVersion("apps.openshift.io/v1") + return b +} + +// ExtractDeploymentConfig extracts the applied configuration owned by fieldManager from +// deploymentConfig. If no managedFields are found in deploymentConfig for fieldManager, a +// DeploymentConfigApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// deploymentConfig must be a unmodified DeploymentConfig API object that was retrieved from the Kubernetes API. +// ExtractDeploymentConfig provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractDeploymentConfig(deploymentConfig *appsv1.DeploymentConfig, fieldManager string) (*DeploymentConfigApplyConfiguration, error) { + return extractDeploymentConfig(deploymentConfig, fieldManager, "") +} + +// ExtractDeploymentConfigStatus is the same as ExtractDeploymentConfig except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractDeploymentConfigStatus(deploymentConfig *appsv1.DeploymentConfig, fieldManager string) (*DeploymentConfigApplyConfiguration, error) { + return extractDeploymentConfig(deploymentConfig, fieldManager, "status") +} + +func extractDeploymentConfig(deploymentConfig *appsv1.DeploymentConfig, fieldManager string, subresource string) (*DeploymentConfigApplyConfiguration, error) { + b := &DeploymentConfigApplyConfiguration{} + err := managedfields.ExtractInto(deploymentConfig, internal.Parser().Type("com.github.openshift.api.apps.v1.DeploymentConfig"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(deploymentConfig.Name) + b.WithNamespace(deploymentConfig.Namespace) + + b.WithKind("DeploymentConfig") + b.WithAPIVersion("apps.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *DeploymentConfigApplyConfiguration) WithKind(value string) *DeploymentConfigApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *DeploymentConfigApplyConfiguration) WithAPIVersion(value string) *DeploymentConfigApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *DeploymentConfigApplyConfiguration) WithName(value string) *DeploymentConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *DeploymentConfigApplyConfiguration) WithGenerateName(value string) *DeploymentConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *DeploymentConfigApplyConfiguration) WithNamespace(value string) *DeploymentConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *DeploymentConfigApplyConfiguration) WithUID(value types.UID) *DeploymentConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *DeploymentConfigApplyConfiguration) WithResourceVersion(value string) *DeploymentConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *DeploymentConfigApplyConfiguration) WithGeneration(value int64) *DeploymentConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *DeploymentConfigApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *DeploymentConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *DeploymentConfigApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *DeploymentConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *DeploymentConfigApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DeploymentConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *DeploymentConfigApplyConfiguration) WithLabels(entries map[string]string) *DeploymentConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *DeploymentConfigApplyConfiguration) WithAnnotations(entries map[string]string) *DeploymentConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *DeploymentConfigApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *DeploymentConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *DeploymentConfigApplyConfiguration) WithFinalizers(values ...string) *DeploymentConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *DeploymentConfigApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *DeploymentConfigApplyConfiguration) WithSpec(value *DeploymentConfigSpecApplyConfiguration) *DeploymentConfigApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *DeploymentConfigApplyConfiguration) WithStatus(value *DeploymentConfigStatusApplyConfiguration) *DeploymentConfigApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DeploymentConfigApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentconfigspec.go b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentconfigspec.go new file mode 100644 index 0000000000000..90d869ace885e --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentconfigspec.go @@ -0,0 +1,106 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + appsv1 "github.com/openshift/api/apps/v1" + corev1 "k8s.io/api/core/v1" +) + +// DeploymentConfigSpecApplyConfiguration represents a declarative configuration of the DeploymentConfigSpec type for use +// with apply. +type DeploymentConfigSpecApplyConfiguration struct { + Strategy *DeploymentStrategyApplyConfiguration `json:"strategy,omitempty"` + MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + Triggers *appsv1.DeploymentTriggerPolicies `json:"triggers,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` + Test *bool `json:"test,omitempty"` + Paused *bool `json:"paused,omitempty"` + Selector map[string]string `json:"selector,omitempty"` + Template *corev1.PodTemplateSpec `json:"template,omitempty"` +} + +// DeploymentConfigSpecApplyConfiguration constructs a declarative configuration of the DeploymentConfigSpec type for use with +// apply. +func DeploymentConfigSpec() *DeploymentConfigSpecApplyConfiguration { + return &DeploymentConfigSpecApplyConfiguration{} +} + +// WithStrategy sets the Strategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Strategy field is set to the value of the last call. +func (b *DeploymentConfigSpecApplyConfiguration) WithStrategy(value *DeploymentStrategyApplyConfiguration) *DeploymentConfigSpecApplyConfiguration { + b.Strategy = value + return b +} + +// WithMinReadySeconds sets the MinReadySeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinReadySeconds field is set to the value of the last call. +func (b *DeploymentConfigSpecApplyConfiguration) WithMinReadySeconds(value int32) *DeploymentConfigSpecApplyConfiguration { + b.MinReadySeconds = &value + return b +} + +// WithTriggers sets the Triggers field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Triggers field is set to the value of the last call. +func (b *DeploymentConfigSpecApplyConfiguration) WithTriggers(value appsv1.DeploymentTriggerPolicies) *DeploymentConfigSpecApplyConfiguration { + b.Triggers = &value + return b +} + +// WithReplicas sets the Replicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Replicas field is set to the value of the last call. +func (b *DeploymentConfigSpecApplyConfiguration) WithReplicas(value int32) *DeploymentConfigSpecApplyConfiguration { + b.Replicas = &value + return b +} + +// WithRevisionHistoryLimit sets the RevisionHistoryLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RevisionHistoryLimit field is set to the value of the last call. +func (b *DeploymentConfigSpecApplyConfiguration) WithRevisionHistoryLimit(value int32) *DeploymentConfigSpecApplyConfiguration { + b.RevisionHistoryLimit = &value + return b +} + +// WithTest sets the Test field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Test field is set to the value of the last call. +func (b *DeploymentConfigSpecApplyConfiguration) WithTest(value bool) *DeploymentConfigSpecApplyConfiguration { + b.Test = &value + return b +} + +// WithPaused sets the Paused field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Paused field is set to the value of the last call. +func (b *DeploymentConfigSpecApplyConfiguration) WithPaused(value bool) *DeploymentConfigSpecApplyConfiguration { + b.Paused = &value + return b +} + +// WithSelector puts the entries into the Selector field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Selector field, +// overwriting an existing map entries in Selector field with the same key. +func (b *DeploymentConfigSpecApplyConfiguration) WithSelector(entries map[string]string) *DeploymentConfigSpecApplyConfiguration { + if b.Selector == nil && len(entries) > 0 { + b.Selector = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Selector[k] = v + } + return b +} + +// WithTemplate sets the Template field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Template field is set to the value of the last call. +func (b *DeploymentConfigSpecApplyConfiguration) WithTemplate(value corev1.PodTemplateSpec) *DeploymentConfigSpecApplyConfiguration { + b.Template = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentconfigstatus.go b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentconfigstatus.go new file mode 100644 index 0000000000000..1ae86c5653dd9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentconfigstatus.go @@ -0,0 +1,100 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// DeploymentConfigStatusApplyConfiguration represents a declarative configuration of the DeploymentConfigStatus type for use +// with apply. +type DeploymentConfigStatusApplyConfiguration struct { + LatestVersion *int64 `json:"latestVersion,omitempty"` + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` + UpdatedReplicas *int32 `json:"updatedReplicas,omitempty"` + AvailableReplicas *int32 `json:"availableReplicas,omitempty"` + UnavailableReplicas *int32 `json:"unavailableReplicas,omitempty"` + Details *DeploymentDetailsApplyConfiguration `json:"details,omitempty"` + Conditions []DeploymentConditionApplyConfiguration `json:"conditions,omitempty"` + ReadyReplicas *int32 `json:"readyReplicas,omitempty"` +} + +// DeploymentConfigStatusApplyConfiguration constructs a declarative configuration of the DeploymentConfigStatus type for use with +// apply. +func DeploymentConfigStatus() *DeploymentConfigStatusApplyConfiguration { + return &DeploymentConfigStatusApplyConfiguration{} +} + +// WithLatestVersion sets the LatestVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestVersion field is set to the value of the last call. +func (b *DeploymentConfigStatusApplyConfiguration) WithLatestVersion(value int64) *DeploymentConfigStatusApplyConfiguration { + b.LatestVersion = &value + return b +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *DeploymentConfigStatusApplyConfiguration) WithObservedGeneration(value int64) *DeploymentConfigStatusApplyConfiguration { + b.ObservedGeneration = &value + return b +} + +// WithReplicas sets the Replicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Replicas field is set to the value of the last call. +func (b *DeploymentConfigStatusApplyConfiguration) WithReplicas(value int32) *DeploymentConfigStatusApplyConfiguration { + b.Replicas = &value + return b +} + +// WithUpdatedReplicas sets the UpdatedReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UpdatedReplicas field is set to the value of the last call. +func (b *DeploymentConfigStatusApplyConfiguration) WithUpdatedReplicas(value int32) *DeploymentConfigStatusApplyConfiguration { + b.UpdatedReplicas = &value + return b +} + +// WithAvailableReplicas sets the AvailableReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AvailableReplicas field is set to the value of the last call. +func (b *DeploymentConfigStatusApplyConfiguration) WithAvailableReplicas(value int32) *DeploymentConfigStatusApplyConfiguration { + b.AvailableReplicas = &value + return b +} + +// WithUnavailableReplicas sets the UnavailableReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnavailableReplicas field is set to the value of the last call. +func (b *DeploymentConfigStatusApplyConfiguration) WithUnavailableReplicas(value int32) *DeploymentConfigStatusApplyConfiguration { + b.UnavailableReplicas = &value + return b +} + +// WithDetails sets the Details field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Details field is set to the value of the last call. +func (b *DeploymentConfigStatusApplyConfiguration) WithDetails(value *DeploymentDetailsApplyConfiguration) *DeploymentConfigStatusApplyConfiguration { + b.Details = value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *DeploymentConfigStatusApplyConfiguration) WithConditions(values ...*DeploymentConditionApplyConfiguration) *DeploymentConfigStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *DeploymentConfigStatusApplyConfiguration) WithReadyReplicas(value int32) *DeploymentConfigStatusApplyConfiguration { + b.ReadyReplicas = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentdetails.go b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentdetails.go new file mode 100644 index 0000000000000..e2f8e4c31002c --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentdetails.go @@ -0,0 +1,37 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// DeploymentDetailsApplyConfiguration represents a declarative configuration of the DeploymentDetails type for use +// with apply. +type DeploymentDetailsApplyConfiguration struct { + Message *string `json:"message,omitempty"` + Causes []DeploymentCauseApplyConfiguration `json:"causes,omitempty"` +} + +// DeploymentDetailsApplyConfiguration constructs a declarative configuration of the DeploymentDetails type for use with +// apply. +func DeploymentDetails() *DeploymentDetailsApplyConfiguration { + return &DeploymentDetailsApplyConfiguration{} +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *DeploymentDetailsApplyConfiguration) WithMessage(value string) *DeploymentDetailsApplyConfiguration { + b.Message = &value + return b +} + +// WithCauses adds the given value to the Causes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Causes field. +func (b *DeploymentDetailsApplyConfiguration) WithCauses(values ...*DeploymentCauseApplyConfiguration) *DeploymentDetailsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithCauses") + } + b.Causes = append(b.Causes, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentstrategy.go b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentstrategy.go new file mode 100644 index 0000000000000..021626305100c --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymentstrategy.go @@ -0,0 +1,103 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + appsv1 "github.com/openshift/api/apps/v1" + corev1 "k8s.io/api/core/v1" +) + +// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use +// with apply. +type DeploymentStrategyApplyConfiguration struct { + Type *appsv1.DeploymentStrategyType `json:"type,omitempty"` + CustomParams *CustomDeploymentStrategyParamsApplyConfiguration `json:"customParams,omitempty"` + RecreateParams *RecreateDeploymentStrategyParamsApplyConfiguration `json:"recreateParams,omitempty"` + RollingParams *RollingDeploymentStrategyParamsApplyConfiguration `json:"rollingParams,omitempty"` + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` +} + +// DeploymentStrategyApplyConfiguration constructs a declarative configuration of the DeploymentStrategy type for use with +// apply. +func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { + return &DeploymentStrategyApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *DeploymentStrategyApplyConfiguration) WithType(value appsv1.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration { + b.Type = &value + return b +} + +// WithCustomParams sets the CustomParams field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CustomParams field is set to the value of the last call. +func (b *DeploymentStrategyApplyConfiguration) WithCustomParams(value *CustomDeploymentStrategyParamsApplyConfiguration) *DeploymentStrategyApplyConfiguration { + b.CustomParams = value + return b +} + +// WithRecreateParams sets the RecreateParams field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RecreateParams field is set to the value of the last call. +func (b *DeploymentStrategyApplyConfiguration) WithRecreateParams(value *RecreateDeploymentStrategyParamsApplyConfiguration) *DeploymentStrategyApplyConfiguration { + b.RecreateParams = value + return b +} + +// WithRollingParams sets the RollingParams field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RollingParams field is set to the value of the last call. +func (b *DeploymentStrategyApplyConfiguration) WithRollingParams(value *RollingDeploymentStrategyParamsApplyConfiguration) *DeploymentStrategyApplyConfiguration { + b.RollingParams = value + return b +} + +// WithResources sets the Resources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resources field is set to the value of the last call. +func (b *DeploymentStrategyApplyConfiguration) WithResources(value corev1.ResourceRequirements) *DeploymentStrategyApplyConfiguration { + b.Resources = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *DeploymentStrategyApplyConfiguration) WithLabels(entries map[string]string) *DeploymentStrategyApplyConfiguration { + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *DeploymentStrategyApplyConfiguration) WithAnnotations(entries map[string]string) *DeploymentStrategyApplyConfiguration { + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithActiveDeadlineSeconds sets the ActiveDeadlineSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ActiveDeadlineSeconds field is set to the value of the last call. +func (b *DeploymentStrategyApplyConfiguration) WithActiveDeadlineSeconds(value int64) *DeploymentStrategyApplyConfiguration { + b.ActiveDeadlineSeconds = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymenttriggerimagechangeparams.go b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymenttriggerimagechangeparams.go new file mode 100644 index 0000000000000..f8db16284607d --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymenttriggerimagechangeparams.go @@ -0,0 +1,56 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// DeploymentTriggerImageChangeParamsApplyConfiguration represents a declarative configuration of the DeploymentTriggerImageChangeParams type for use +// with apply. +type DeploymentTriggerImageChangeParamsApplyConfiguration struct { + Automatic *bool `json:"automatic,omitempty"` + ContainerNames []string `json:"containerNames,omitempty"` + From *corev1.ObjectReference `json:"from,omitempty"` + LastTriggeredImage *string `json:"lastTriggeredImage,omitempty"` +} + +// DeploymentTriggerImageChangeParamsApplyConfiguration constructs a declarative configuration of the DeploymentTriggerImageChangeParams type for use with +// apply. +func DeploymentTriggerImageChangeParams() *DeploymentTriggerImageChangeParamsApplyConfiguration { + return &DeploymentTriggerImageChangeParamsApplyConfiguration{} +} + +// WithAutomatic sets the Automatic field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Automatic field is set to the value of the last call. +func (b *DeploymentTriggerImageChangeParamsApplyConfiguration) WithAutomatic(value bool) *DeploymentTriggerImageChangeParamsApplyConfiguration { + b.Automatic = &value + return b +} + +// WithContainerNames adds the given value to the ContainerNames field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ContainerNames field. +func (b *DeploymentTriggerImageChangeParamsApplyConfiguration) WithContainerNames(values ...string) *DeploymentTriggerImageChangeParamsApplyConfiguration { + for i := range values { + b.ContainerNames = append(b.ContainerNames, values[i]) + } + return b +} + +// WithFrom sets the From field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the From field is set to the value of the last call. +func (b *DeploymentTriggerImageChangeParamsApplyConfiguration) WithFrom(value corev1.ObjectReference) *DeploymentTriggerImageChangeParamsApplyConfiguration { + b.From = &value + return b +} + +// WithLastTriggeredImage sets the LastTriggeredImage field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTriggeredImage field is set to the value of the last call. +func (b *DeploymentTriggerImageChangeParamsApplyConfiguration) WithLastTriggeredImage(value string) *DeploymentTriggerImageChangeParamsApplyConfiguration { + b.LastTriggeredImage = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymenttriggerpolicy.go b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymenttriggerpolicy.go new file mode 100644 index 0000000000000..7e6dc5b5b601a --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/deploymenttriggerpolicy.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + appsv1 "github.com/openshift/api/apps/v1" +) + +// DeploymentTriggerPolicyApplyConfiguration represents a declarative configuration of the DeploymentTriggerPolicy type for use +// with apply. +type DeploymentTriggerPolicyApplyConfiguration struct { + Type *appsv1.DeploymentTriggerType `json:"type,omitempty"` + ImageChangeParams *DeploymentTriggerImageChangeParamsApplyConfiguration `json:"imageChangeParams,omitempty"` +} + +// DeploymentTriggerPolicyApplyConfiguration constructs a declarative configuration of the DeploymentTriggerPolicy type for use with +// apply. +func DeploymentTriggerPolicy() *DeploymentTriggerPolicyApplyConfiguration { + return &DeploymentTriggerPolicyApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *DeploymentTriggerPolicyApplyConfiguration) WithType(value appsv1.DeploymentTriggerType) *DeploymentTriggerPolicyApplyConfiguration { + b.Type = &value + return b +} + +// WithImageChangeParams sets the ImageChangeParams field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageChangeParams field is set to the value of the last call. +func (b *DeploymentTriggerPolicyApplyConfiguration) WithImageChangeParams(value *DeploymentTriggerImageChangeParamsApplyConfiguration) *DeploymentTriggerPolicyApplyConfiguration { + b.ImageChangeParams = value + return b +} diff --git a/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/execnewpodhook.go b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/execnewpodhook.go new file mode 100644 index 0000000000000..ae81c4ef53286 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/execnewpodhook.go @@ -0,0 +1,60 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// ExecNewPodHookApplyConfiguration represents a declarative configuration of the ExecNewPodHook type for use +// with apply. +type ExecNewPodHookApplyConfiguration struct { + Command []string `json:"command,omitempty"` + Env []corev1.EnvVar `json:"env,omitempty"` + ContainerName *string `json:"containerName,omitempty"` + Volumes []string `json:"volumes,omitempty"` +} + +// ExecNewPodHookApplyConfiguration constructs a declarative configuration of the ExecNewPodHook type for use with +// apply. +func ExecNewPodHook() *ExecNewPodHookApplyConfiguration { + return &ExecNewPodHookApplyConfiguration{} +} + +// WithCommand adds the given value to the Command field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Command field. +func (b *ExecNewPodHookApplyConfiguration) WithCommand(values ...string) *ExecNewPodHookApplyConfiguration { + for i := range values { + b.Command = append(b.Command, values[i]) + } + return b +} + +// WithEnv adds the given value to the Env field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Env field. +func (b *ExecNewPodHookApplyConfiguration) WithEnv(values ...corev1.EnvVar) *ExecNewPodHookApplyConfiguration { + for i := range values { + b.Env = append(b.Env, values[i]) + } + return b +} + +// WithContainerName sets the ContainerName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ContainerName field is set to the value of the last call. +func (b *ExecNewPodHookApplyConfiguration) WithContainerName(value string) *ExecNewPodHookApplyConfiguration { + b.ContainerName = &value + return b +} + +// WithVolumes adds the given value to the Volumes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Volumes field. +func (b *ExecNewPodHookApplyConfiguration) WithVolumes(values ...string) *ExecNewPodHookApplyConfiguration { + for i := range values { + b.Volumes = append(b.Volumes, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/lifecyclehook.go b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/lifecyclehook.go new file mode 100644 index 0000000000000..637f064e48e70 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/lifecyclehook.go @@ -0,0 +1,50 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + appsv1 "github.com/openshift/api/apps/v1" +) + +// LifecycleHookApplyConfiguration represents a declarative configuration of the LifecycleHook type for use +// with apply. +type LifecycleHookApplyConfiguration struct { + FailurePolicy *appsv1.LifecycleHookFailurePolicy `json:"failurePolicy,omitempty"` + ExecNewPod *ExecNewPodHookApplyConfiguration `json:"execNewPod,omitempty"` + TagImages []TagImageHookApplyConfiguration `json:"tagImages,omitempty"` +} + +// LifecycleHookApplyConfiguration constructs a declarative configuration of the LifecycleHook type for use with +// apply. +func LifecycleHook() *LifecycleHookApplyConfiguration { + return &LifecycleHookApplyConfiguration{} +} + +// WithFailurePolicy sets the FailurePolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FailurePolicy field is set to the value of the last call. +func (b *LifecycleHookApplyConfiguration) WithFailurePolicy(value appsv1.LifecycleHookFailurePolicy) *LifecycleHookApplyConfiguration { + b.FailurePolicy = &value + return b +} + +// WithExecNewPod sets the ExecNewPod field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExecNewPod field is set to the value of the last call. +func (b *LifecycleHookApplyConfiguration) WithExecNewPod(value *ExecNewPodHookApplyConfiguration) *LifecycleHookApplyConfiguration { + b.ExecNewPod = value + return b +} + +// WithTagImages adds the given value to the TagImages field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the TagImages field. +func (b *LifecycleHookApplyConfiguration) WithTagImages(values ...*TagImageHookApplyConfiguration) *LifecycleHookApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithTagImages") + } + b.TagImages = append(b.TagImages, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/recreatedeploymentstrategyparams.go b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/recreatedeploymentstrategyparams.go new file mode 100644 index 0000000000000..4766c8bdd1830 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/recreatedeploymentstrategyparams.go @@ -0,0 +1,50 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// RecreateDeploymentStrategyParamsApplyConfiguration represents a declarative configuration of the RecreateDeploymentStrategyParams type for use +// with apply. +type RecreateDeploymentStrategyParamsApplyConfiguration struct { + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty"` + Pre *LifecycleHookApplyConfiguration `json:"pre,omitempty"` + Mid *LifecycleHookApplyConfiguration `json:"mid,omitempty"` + Post *LifecycleHookApplyConfiguration `json:"post,omitempty"` +} + +// RecreateDeploymentStrategyParamsApplyConfiguration constructs a declarative configuration of the RecreateDeploymentStrategyParams type for use with +// apply. +func RecreateDeploymentStrategyParams() *RecreateDeploymentStrategyParamsApplyConfiguration { + return &RecreateDeploymentStrategyParamsApplyConfiguration{} +} + +// WithTimeoutSeconds sets the TimeoutSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TimeoutSeconds field is set to the value of the last call. +func (b *RecreateDeploymentStrategyParamsApplyConfiguration) WithTimeoutSeconds(value int64) *RecreateDeploymentStrategyParamsApplyConfiguration { + b.TimeoutSeconds = &value + return b +} + +// WithPre sets the Pre field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pre field is set to the value of the last call. +func (b *RecreateDeploymentStrategyParamsApplyConfiguration) WithPre(value *LifecycleHookApplyConfiguration) *RecreateDeploymentStrategyParamsApplyConfiguration { + b.Pre = value + return b +} + +// WithMid sets the Mid field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Mid field is set to the value of the last call. +func (b *RecreateDeploymentStrategyParamsApplyConfiguration) WithMid(value *LifecycleHookApplyConfiguration) *RecreateDeploymentStrategyParamsApplyConfiguration { + b.Mid = value + return b +} + +// WithPost sets the Post field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Post field is set to the value of the last call. +func (b *RecreateDeploymentStrategyParamsApplyConfiguration) WithPost(value *LifecycleHookApplyConfiguration) *RecreateDeploymentStrategyParamsApplyConfiguration { + b.Post = value + return b +} diff --git a/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/rollingdeploymentstrategyparams.go b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/rollingdeploymentstrategyparams.go new file mode 100644 index 0000000000000..60158d11950c8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/rollingdeploymentstrategyparams.go @@ -0,0 +1,81 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// RollingDeploymentStrategyParamsApplyConfiguration represents a declarative configuration of the RollingDeploymentStrategyParams type for use +// with apply. +type RollingDeploymentStrategyParamsApplyConfiguration struct { + UpdatePeriodSeconds *int64 `json:"updatePeriodSeconds,omitempty"` + IntervalSeconds *int64 `json:"intervalSeconds,omitempty"` + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty"` + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` + Pre *LifecycleHookApplyConfiguration `json:"pre,omitempty"` + Post *LifecycleHookApplyConfiguration `json:"post,omitempty"` +} + +// RollingDeploymentStrategyParamsApplyConfiguration constructs a declarative configuration of the RollingDeploymentStrategyParams type for use with +// apply. +func RollingDeploymentStrategyParams() *RollingDeploymentStrategyParamsApplyConfiguration { + return &RollingDeploymentStrategyParamsApplyConfiguration{} +} + +// WithUpdatePeriodSeconds sets the UpdatePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UpdatePeriodSeconds field is set to the value of the last call. +func (b *RollingDeploymentStrategyParamsApplyConfiguration) WithUpdatePeriodSeconds(value int64) *RollingDeploymentStrategyParamsApplyConfiguration { + b.UpdatePeriodSeconds = &value + return b +} + +// WithIntervalSeconds sets the IntervalSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IntervalSeconds field is set to the value of the last call. +func (b *RollingDeploymentStrategyParamsApplyConfiguration) WithIntervalSeconds(value int64) *RollingDeploymentStrategyParamsApplyConfiguration { + b.IntervalSeconds = &value + return b +} + +// WithTimeoutSeconds sets the TimeoutSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TimeoutSeconds field is set to the value of the last call. +func (b *RollingDeploymentStrategyParamsApplyConfiguration) WithTimeoutSeconds(value int64) *RollingDeploymentStrategyParamsApplyConfiguration { + b.TimeoutSeconds = &value + return b +} + +// WithMaxUnavailable sets the MaxUnavailable field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxUnavailable field is set to the value of the last call. +func (b *RollingDeploymentStrategyParamsApplyConfiguration) WithMaxUnavailable(value intstr.IntOrString) *RollingDeploymentStrategyParamsApplyConfiguration { + b.MaxUnavailable = &value + return b +} + +// WithMaxSurge sets the MaxSurge field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxSurge field is set to the value of the last call. +func (b *RollingDeploymentStrategyParamsApplyConfiguration) WithMaxSurge(value intstr.IntOrString) *RollingDeploymentStrategyParamsApplyConfiguration { + b.MaxSurge = &value + return b +} + +// WithPre sets the Pre field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pre field is set to the value of the last call. +func (b *RollingDeploymentStrategyParamsApplyConfiguration) WithPre(value *LifecycleHookApplyConfiguration) *RollingDeploymentStrategyParamsApplyConfiguration { + b.Pre = value + return b +} + +// WithPost sets the Post field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Post field is set to the value of the last call. +func (b *RollingDeploymentStrategyParamsApplyConfiguration) WithPost(value *LifecycleHookApplyConfiguration) *RollingDeploymentStrategyParamsApplyConfiguration { + b.Post = value + return b +} diff --git a/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/tagimagehook.go b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/tagimagehook.go new file mode 100644 index 0000000000000..6b88c2f5084c3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/applyconfigurations/apps/v1/tagimagehook.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// TagImageHookApplyConfiguration represents a declarative configuration of the TagImageHook type for use +// with apply. +type TagImageHookApplyConfiguration struct { + ContainerName *string `json:"containerName,omitempty"` + To *corev1.ObjectReference `json:"to,omitempty"` +} + +// TagImageHookApplyConfiguration constructs a declarative configuration of the TagImageHook type for use with +// apply. +func TagImageHook() *TagImageHookApplyConfiguration { + return &TagImageHookApplyConfiguration{} +} + +// WithContainerName sets the ContainerName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ContainerName field is set to the value of the last call. +func (b *TagImageHookApplyConfiguration) WithContainerName(value string) *TagImageHookApplyConfiguration { + b.ContainerName = &value + return b +} + +// WithTo sets the To field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the To field is set to the value of the last call. +func (b *TagImageHookApplyConfiguration) WithTo(value corev1.ObjectReference) *TagImageHookApplyConfiguration { + b.To = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/apps/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/apps/applyconfigurations/internal/internal.go new file mode 100644 index 0000000000000..92598eb14328f --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/applyconfigurations/internal/internal.go @@ -0,0 +1,2603 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package internal + +import ( + fmt "fmt" + sync "sync" + + typed "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +func Parser() *typed.Parser { + parserOnce.Do(func() { + var err error + parser, err = typed.NewParser(schemaYAML) + if err != nil { + panic(fmt.Sprintf("Failed to parse schema: %v", err)) + } + }) + return parser +} + +var parserOnce sync.Once +var parser *typed.Parser +var schemaYAML = typed.YAMLObject(`types: +- name: com.github.openshift.api.apps.v1.CustomDeploymentStrategyParams + map: + fields: + - name: command + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: environment + type: + list: + elementType: + namedType: io.k8s.api.core.v1.EnvVar + elementRelationship: atomic + - name: image + type: + scalar: string +- name: com.github.openshift.api.apps.v1.DeploymentCause + map: + fields: + - name: imageTrigger + type: + namedType: com.github.openshift.api.apps.v1.DeploymentCauseImageTrigger + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.apps.v1.DeploymentCauseImageTrigger + map: + fields: + - name: from + type: + namedType: io.k8s.api.core.v1.ObjectReference + default: {} +- name: com.github.openshift.api.apps.v1.DeploymentCondition + map: + fields: + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: lastUpdateTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message + type: + scalar: string + - name: reason + type: + scalar: string + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.apps.v1.DeploymentConfig + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.apps.v1.DeploymentConfigSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.apps.v1.DeploymentConfigStatus + default: {} +- name: com.github.openshift.api.apps.v1.DeploymentConfigSpec + map: + fields: + - name: minReadySeconds + type: + scalar: numeric + - name: paused + type: + scalar: boolean + - name: replicas + type: + scalar: numeric + default: 0 + - name: revisionHistoryLimit + type: + scalar: numeric + - name: selector + type: + map: + elementType: + scalar: string + - name: strategy + type: + namedType: com.github.openshift.api.apps.v1.DeploymentStrategy + default: {} + - name: template + type: + namedType: io.k8s.api.core.v1.PodTemplateSpec + - name: test + type: + scalar: boolean + default: false + - name: triggers + type: + list: + elementType: + namedType: com.github.openshift.api.apps.v1.DeploymentTriggerPolicy + elementRelationship: atomic +- name: com.github.openshift.api.apps.v1.DeploymentConfigStatus + map: + fields: + - name: availableReplicas + type: + scalar: numeric + default: 0 + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.apps.v1.DeploymentCondition + elementRelationship: associative + keys: + - type + - name: details + type: + namedType: com.github.openshift.api.apps.v1.DeploymentDetails + - name: latestVersion + type: + scalar: numeric + default: 0 + - name: observedGeneration + type: + scalar: numeric + default: 0 + - name: readyReplicas + type: + scalar: numeric + - name: replicas + type: + scalar: numeric + default: 0 + - name: unavailableReplicas + type: + scalar: numeric + default: 0 + - name: updatedReplicas + type: + scalar: numeric + default: 0 +- name: com.github.openshift.api.apps.v1.DeploymentDetails + map: + fields: + - name: causes + type: + list: + elementType: + namedType: com.github.openshift.api.apps.v1.DeploymentCause + elementRelationship: atomic + - name: message + type: + scalar: string +- name: com.github.openshift.api.apps.v1.DeploymentStrategy + map: + fields: + - name: activeDeadlineSeconds + type: + scalar: numeric + - name: annotations + type: + map: + elementType: + scalar: string + - name: customParams + type: + namedType: com.github.openshift.api.apps.v1.CustomDeploymentStrategyParams + - name: labels + type: + map: + elementType: + scalar: string + - name: recreateParams + type: + namedType: com.github.openshift.api.apps.v1.RecreateDeploymentStrategyParams + - name: resources + type: + namedType: io.k8s.api.core.v1.ResourceRequirements + default: {} + - name: rollingParams + type: + namedType: com.github.openshift.api.apps.v1.RollingDeploymentStrategyParams + - name: type + type: + scalar: string +- name: com.github.openshift.api.apps.v1.DeploymentTriggerImageChangeParams + map: + fields: + - name: automatic + type: + scalar: boolean + - name: containerNames + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: from + type: + namedType: io.k8s.api.core.v1.ObjectReference + default: {} + - name: lastTriggeredImage + type: + scalar: string +- name: com.github.openshift.api.apps.v1.DeploymentTriggerPolicy + map: + fields: + - name: imageChangeParams + type: + namedType: com.github.openshift.api.apps.v1.DeploymentTriggerImageChangeParams + - name: type + type: + scalar: string +- name: com.github.openshift.api.apps.v1.ExecNewPodHook + map: + fields: + - name: command + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: containerName + type: + scalar: string + default: "" + - name: env + type: + list: + elementType: + namedType: io.k8s.api.core.v1.EnvVar + elementRelationship: atomic + - name: volumes + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.apps.v1.LifecycleHook + map: + fields: + - name: execNewPod + type: + namedType: com.github.openshift.api.apps.v1.ExecNewPodHook + - name: failurePolicy + type: + scalar: string + default: "" + - name: tagImages + type: + list: + elementType: + namedType: com.github.openshift.api.apps.v1.TagImageHook + elementRelationship: atomic +- name: com.github.openshift.api.apps.v1.RecreateDeploymentStrategyParams + map: + fields: + - name: mid + type: + namedType: com.github.openshift.api.apps.v1.LifecycleHook + - name: post + type: + namedType: com.github.openshift.api.apps.v1.LifecycleHook + - name: pre + type: + namedType: com.github.openshift.api.apps.v1.LifecycleHook + - name: timeoutSeconds + type: + scalar: numeric +- name: com.github.openshift.api.apps.v1.RollingDeploymentStrategyParams + map: + fields: + - name: intervalSeconds + type: + scalar: numeric + - name: maxSurge + type: + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString + - name: maxUnavailable + type: + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString + - name: post + type: + namedType: com.github.openshift.api.apps.v1.LifecycleHook + - name: pre + type: + namedType: com.github.openshift.api.apps.v1.LifecycleHook + - name: timeoutSeconds + type: + scalar: numeric + - name: updatePeriodSeconds + type: + scalar: numeric +- name: com.github.openshift.api.apps.v1.TagImageHook + map: + fields: + - name: containerName + type: + scalar: string + default: "" + - name: to + type: + namedType: io.k8s.api.core.v1.ObjectReference + default: {} +- name: io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource + map: + fields: + - name: fsType + type: + scalar: string + - name: partition + type: + scalar: numeric + - name: readOnly + type: + scalar: boolean + - name: volumeID + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.Affinity + map: + fields: + - name: nodeAffinity + type: + namedType: io.k8s.api.core.v1.NodeAffinity + - name: podAffinity + type: + namedType: io.k8s.api.core.v1.PodAffinity + - name: podAntiAffinity + type: + namedType: io.k8s.api.core.v1.PodAntiAffinity +- name: io.k8s.api.core.v1.AppArmorProfile + map: + fields: + - name: localhostProfile + type: + scalar: string + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: localhostProfile + discriminatorValue: LocalhostProfile +- name: io.k8s.api.core.v1.AzureDiskVolumeSource + map: + fields: + - name: cachingMode + type: + scalar: string + default: ReadWrite + - name: diskName + type: + scalar: string + default: "" + - name: diskURI + type: + scalar: string + default: "" + - name: fsType + type: + scalar: string + default: ext4 + - name: kind + type: + scalar: string + default: Shared + - name: readOnly + type: + scalar: boolean + default: false +- name: io.k8s.api.core.v1.AzureFileVolumeSource + map: + fields: + - name: readOnly + type: + scalar: boolean + - name: secretName + type: + scalar: string + default: "" + - name: shareName + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.CSIVolumeSource + map: + fields: + - name: driver + type: + scalar: string + default: "" + - name: fsType + type: + scalar: string + - name: nodePublishSecretRef + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: readOnly + type: + scalar: boolean + - name: volumeAttributes + type: + map: + elementType: + scalar: string +- name: io.k8s.api.core.v1.Capabilities + map: + fields: + - name: add + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: drop + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.core.v1.CephFSVolumeSource + map: + fields: + - name: monitors + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: path + type: + scalar: string + - name: readOnly + type: + scalar: boolean + - name: secretFile + type: + scalar: string + - name: secretRef + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: user + type: + scalar: string +- name: io.k8s.api.core.v1.CinderVolumeSource + map: + fields: + - name: fsType + type: + scalar: string + - name: readOnly + type: + scalar: boolean + - name: secretRef + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: volumeID + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.ClusterTrustBundleProjection + map: + fields: + - name: labelSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: name + type: + scalar: string + - name: optional + type: + scalar: boolean + - name: path + type: + scalar: string + default: "" + - name: signerName + type: + scalar: string +- name: io.k8s.api.core.v1.ConfigMapEnvSource + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: optional + type: + scalar: boolean +- name: io.k8s.api.core.v1.ConfigMapKeySelector + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: optional + type: + scalar: boolean + elementRelationship: atomic +- name: io.k8s.api.core.v1.ConfigMapProjection + map: + fields: + - name: items + type: + list: + elementType: + namedType: io.k8s.api.core.v1.KeyToPath + elementRelationship: atomic + - name: name + type: + scalar: string + default: "" + - name: optional + type: + scalar: boolean +- name: io.k8s.api.core.v1.ConfigMapVolumeSource + map: + fields: + - name: defaultMode + type: + scalar: numeric + - name: items + type: + list: + elementType: + namedType: io.k8s.api.core.v1.KeyToPath + elementRelationship: atomic + - name: name + type: + scalar: string + default: "" + - name: optional + type: + scalar: boolean +- name: io.k8s.api.core.v1.Container + map: + fields: + - name: args + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: command + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: env + type: + list: + elementType: + namedType: io.k8s.api.core.v1.EnvVar + elementRelationship: associative + keys: + - name + - name: envFrom + type: + list: + elementType: + namedType: io.k8s.api.core.v1.EnvFromSource + elementRelationship: atomic + - name: image + type: + scalar: string + - name: imagePullPolicy + type: + scalar: string + - name: lifecycle + type: + namedType: io.k8s.api.core.v1.Lifecycle + - name: livenessProbe + type: + namedType: io.k8s.api.core.v1.Probe + - name: name + type: + scalar: string + default: "" + - name: ports + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ContainerPort + elementRelationship: associative + keys: + - containerPort + - protocol + - name: readinessProbe + type: + namedType: io.k8s.api.core.v1.Probe + - name: resizePolicy + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ContainerResizePolicy + elementRelationship: atomic + - name: resources + type: + namedType: io.k8s.api.core.v1.ResourceRequirements + default: {} + - name: restartPolicy + type: + scalar: string + - name: securityContext + type: + namedType: io.k8s.api.core.v1.SecurityContext + - name: startupProbe + type: + namedType: io.k8s.api.core.v1.Probe + - name: stdin + type: + scalar: boolean + - name: stdinOnce + type: + scalar: boolean + - name: terminationMessagePath + type: + scalar: string + - name: terminationMessagePolicy + type: + scalar: string + - name: tty + type: + scalar: boolean + - name: volumeDevices + type: + list: + elementType: + namedType: io.k8s.api.core.v1.VolumeDevice + elementRelationship: associative + keys: + - devicePath + - name: volumeMounts + type: + list: + elementType: + namedType: io.k8s.api.core.v1.VolumeMount + elementRelationship: associative + keys: + - mountPath + - name: workingDir + type: + scalar: string +- name: io.k8s.api.core.v1.ContainerPort + map: + fields: + - name: containerPort + type: + scalar: numeric + default: 0 + - name: hostIP + type: + scalar: string + - name: hostPort + type: + scalar: numeric + - name: name + type: + scalar: string + - name: protocol + type: + scalar: string + default: TCP +- name: io.k8s.api.core.v1.ContainerResizePolicy + map: + fields: + - name: resourceName + type: + scalar: string + default: "" + - name: restartPolicy + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.DownwardAPIProjection + map: + fields: + - name: items + type: + list: + elementType: + namedType: io.k8s.api.core.v1.DownwardAPIVolumeFile + elementRelationship: atomic +- name: io.k8s.api.core.v1.DownwardAPIVolumeFile + map: + fields: + - name: fieldRef + type: + namedType: io.k8s.api.core.v1.ObjectFieldSelector + - name: mode + type: + scalar: numeric + - name: path + type: + scalar: string + default: "" + - name: resourceFieldRef + type: + namedType: io.k8s.api.core.v1.ResourceFieldSelector +- name: io.k8s.api.core.v1.DownwardAPIVolumeSource + map: + fields: + - name: defaultMode + type: + scalar: numeric + - name: items + type: + list: + elementType: + namedType: io.k8s.api.core.v1.DownwardAPIVolumeFile + elementRelationship: atomic +- name: io.k8s.api.core.v1.EmptyDirVolumeSource + map: + fields: + - name: medium + type: + scalar: string + - name: sizeLimit + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.core.v1.EnvFromSource + map: + fields: + - name: configMapRef + type: + namedType: io.k8s.api.core.v1.ConfigMapEnvSource + - name: prefix + type: + scalar: string + - name: secretRef + type: + namedType: io.k8s.api.core.v1.SecretEnvSource +- name: io.k8s.api.core.v1.EnvVar + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: value + type: + scalar: string + - name: valueFrom + type: + namedType: io.k8s.api.core.v1.EnvVarSource +- name: io.k8s.api.core.v1.EnvVarSource + map: + fields: + - name: configMapKeyRef + type: + namedType: io.k8s.api.core.v1.ConfigMapKeySelector + - name: fieldRef + type: + namedType: io.k8s.api.core.v1.ObjectFieldSelector + - name: resourceFieldRef + type: + namedType: io.k8s.api.core.v1.ResourceFieldSelector + - name: secretKeyRef + type: + namedType: io.k8s.api.core.v1.SecretKeySelector +- name: io.k8s.api.core.v1.EphemeralContainer + map: + fields: + - name: args + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: command + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: env + type: + list: + elementType: + namedType: io.k8s.api.core.v1.EnvVar + elementRelationship: associative + keys: + - name + - name: envFrom + type: + list: + elementType: + namedType: io.k8s.api.core.v1.EnvFromSource + elementRelationship: atomic + - name: image + type: + scalar: string + - name: imagePullPolicy + type: + scalar: string + - name: lifecycle + type: + namedType: io.k8s.api.core.v1.Lifecycle + - name: livenessProbe + type: + namedType: io.k8s.api.core.v1.Probe + - name: name + type: + scalar: string + default: "" + - name: ports + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ContainerPort + elementRelationship: associative + keys: + - containerPort + - protocol + - name: readinessProbe + type: + namedType: io.k8s.api.core.v1.Probe + - name: resizePolicy + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ContainerResizePolicy + elementRelationship: atomic + - name: resources + type: + namedType: io.k8s.api.core.v1.ResourceRequirements + default: {} + - name: restartPolicy + type: + scalar: string + - name: securityContext + type: + namedType: io.k8s.api.core.v1.SecurityContext + - name: startupProbe + type: + namedType: io.k8s.api.core.v1.Probe + - name: stdin + type: + scalar: boolean + - name: stdinOnce + type: + scalar: boolean + - name: targetContainerName + type: + scalar: string + - name: terminationMessagePath + type: + scalar: string + - name: terminationMessagePolicy + type: + scalar: string + - name: tty + type: + scalar: boolean + - name: volumeDevices + type: + list: + elementType: + namedType: io.k8s.api.core.v1.VolumeDevice + elementRelationship: associative + keys: + - devicePath + - name: volumeMounts + type: + list: + elementType: + namedType: io.k8s.api.core.v1.VolumeMount + elementRelationship: associative + keys: + - mountPath + - name: workingDir + type: + scalar: string +- name: io.k8s.api.core.v1.EphemeralVolumeSource + map: + fields: + - name: volumeClaimTemplate + type: + namedType: io.k8s.api.core.v1.PersistentVolumeClaimTemplate +- name: io.k8s.api.core.v1.ExecAction + map: + fields: + - name: command + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.core.v1.FCVolumeSource + map: + fields: + - name: fsType + type: + scalar: string + - name: lun + type: + scalar: numeric + - name: readOnly + type: + scalar: boolean + - name: targetWWNs + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: wwids + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.core.v1.FlexVolumeSource + map: + fields: + - name: driver + type: + scalar: string + default: "" + - name: fsType + type: + scalar: string + - name: options + type: + map: + elementType: + scalar: string + - name: readOnly + type: + scalar: boolean + - name: secretRef + type: + namedType: io.k8s.api.core.v1.LocalObjectReference +- name: io.k8s.api.core.v1.FlockerVolumeSource + map: + fields: + - name: datasetName + type: + scalar: string + - name: datasetUUID + type: + scalar: string +- name: io.k8s.api.core.v1.GCEPersistentDiskVolumeSource + map: + fields: + - name: fsType + type: + scalar: string + - name: partition + type: + scalar: numeric + - name: pdName + type: + scalar: string + default: "" + - name: readOnly + type: + scalar: boolean +- name: io.k8s.api.core.v1.GRPCAction + map: + fields: + - name: port + type: + scalar: numeric + default: 0 + - name: service + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.GitRepoVolumeSource + map: + fields: + - name: directory + type: + scalar: string + - name: repository + type: + scalar: string + default: "" + - name: revision + type: + scalar: string +- name: io.k8s.api.core.v1.GlusterfsVolumeSource + map: + fields: + - name: endpoints + type: + scalar: string + default: "" + - name: path + type: + scalar: string + default: "" + - name: readOnly + type: + scalar: boolean +- name: io.k8s.api.core.v1.HTTPGetAction + map: + fields: + - name: host + type: + scalar: string + - name: httpHeaders + type: + list: + elementType: + namedType: io.k8s.api.core.v1.HTTPHeader + elementRelationship: atomic + - name: path + type: + scalar: string + - name: port + type: + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString + - name: scheme + type: + scalar: string +- name: io.k8s.api.core.v1.HTTPHeader + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: value + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.HostAlias + map: + fields: + - name: hostnames + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: ip + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.HostPathVolumeSource + map: + fields: + - name: path + type: + scalar: string + default: "" + - name: type + type: + scalar: string +- name: io.k8s.api.core.v1.ISCSIVolumeSource + map: + fields: + - name: chapAuthDiscovery + type: + scalar: boolean + - name: chapAuthSession + type: + scalar: boolean + - name: fsType + type: + scalar: string + - name: initiatorName + type: + scalar: string + - name: iqn + type: + scalar: string + default: "" + - name: iscsiInterface + type: + scalar: string + default: default + - name: lun + type: + scalar: numeric + default: 0 + - name: portals + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: readOnly + type: + scalar: boolean + - name: secretRef + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: targetPortal + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.ImageVolumeSource + map: + fields: + - name: pullPolicy + type: + scalar: string + - name: reference + type: + scalar: string +- name: io.k8s.api.core.v1.KeyToPath + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: mode + type: + scalar: numeric + - name: path + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.Lifecycle + map: + fields: + - name: postStart + type: + namedType: io.k8s.api.core.v1.LifecycleHandler + - name: preStop + type: + namedType: io.k8s.api.core.v1.LifecycleHandler +- name: io.k8s.api.core.v1.LifecycleHandler + map: + fields: + - name: exec + type: + namedType: io.k8s.api.core.v1.ExecAction + - name: httpGet + type: + namedType: io.k8s.api.core.v1.HTTPGetAction + - name: sleep + type: + namedType: io.k8s.api.core.v1.SleepAction + - name: tcpSocket + type: + namedType: io.k8s.api.core.v1.TCPSocketAction +- name: io.k8s.api.core.v1.LocalObjectReference + map: + fields: + - name: name + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.api.core.v1.NFSVolumeSource + map: + fields: + - name: path + type: + scalar: string + default: "" + - name: readOnly + type: + scalar: boolean + - name: server + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.NodeAffinity + map: + fields: + - name: preferredDuringSchedulingIgnoredDuringExecution + type: + list: + elementType: + namedType: io.k8s.api.core.v1.PreferredSchedulingTerm + elementRelationship: atomic + - name: requiredDuringSchedulingIgnoredDuringExecution + type: + namedType: io.k8s.api.core.v1.NodeSelector +- name: io.k8s.api.core.v1.NodeSelector + map: + fields: + - name: nodeSelectorTerms + type: + list: + elementType: + namedType: io.k8s.api.core.v1.NodeSelectorTerm + elementRelationship: atomic + elementRelationship: atomic +- name: io.k8s.api.core.v1.NodeSelectorRequirement + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: operator + type: + scalar: string + default: "" + - name: values + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.core.v1.NodeSelectorTerm + map: + fields: + - name: matchExpressions + type: + list: + elementType: + namedType: io.k8s.api.core.v1.NodeSelectorRequirement + elementRelationship: atomic + - name: matchFields + type: + list: + elementType: + namedType: io.k8s.api.core.v1.NodeSelectorRequirement + elementRelationship: atomic + elementRelationship: atomic +- name: io.k8s.api.core.v1.ObjectFieldSelector + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldPath + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.api.core.v1.ObjectReference + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldPath + type: + scalar: string + - name: kind + type: + scalar: string + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: resourceVersion + type: + scalar: string + - name: uid + type: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.core.v1.PersistentVolumeClaimSpec + map: + fields: + - name: accessModes + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: dataSource + type: + namedType: io.k8s.api.core.v1.TypedLocalObjectReference + - name: dataSourceRef + type: + namedType: io.k8s.api.core.v1.TypedObjectReference + - name: resources + type: + namedType: io.k8s.api.core.v1.VolumeResourceRequirements + default: {} + - name: selector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: storageClassName + type: + scalar: string + - name: volumeAttributesClassName + type: + scalar: string + - name: volumeMode + type: + scalar: string + - name: volumeName + type: + scalar: string +- name: io.k8s.api.core.v1.PersistentVolumeClaimTemplate + map: + fields: + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.core.v1.PersistentVolumeClaimSpec + default: {} +- name: io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource + map: + fields: + - name: claimName + type: + scalar: string + default: "" + - name: readOnly + type: + scalar: boolean +- name: io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource + map: + fields: + - name: fsType + type: + scalar: string + - name: pdID + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.PodAffinity + map: + fields: + - name: preferredDuringSchedulingIgnoredDuringExecution + type: + list: + elementType: + namedType: io.k8s.api.core.v1.WeightedPodAffinityTerm + elementRelationship: atomic + - name: requiredDuringSchedulingIgnoredDuringExecution + type: + list: + elementType: + namedType: io.k8s.api.core.v1.PodAffinityTerm + elementRelationship: atomic +- name: io.k8s.api.core.v1.PodAffinityTerm + map: + fields: + - name: labelSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: matchLabelKeys + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: mismatchLabelKeys + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: namespaceSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: namespaces + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: topologyKey + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.PodAntiAffinity + map: + fields: + - name: preferredDuringSchedulingIgnoredDuringExecution + type: + list: + elementType: + namedType: io.k8s.api.core.v1.WeightedPodAffinityTerm + elementRelationship: atomic + - name: requiredDuringSchedulingIgnoredDuringExecution + type: + list: + elementType: + namedType: io.k8s.api.core.v1.PodAffinityTerm + elementRelationship: atomic +- name: io.k8s.api.core.v1.PodDNSConfig + map: + fields: + - name: nameservers + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: options + type: + list: + elementType: + namedType: io.k8s.api.core.v1.PodDNSConfigOption + elementRelationship: atomic + - name: searches + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.core.v1.PodDNSConfigOption + map: + fields: + - name: name + type: + scalar: string + - name: value + type: + scalar: string +- name: io.k8s.api.core.v1.PodOS + map: + fields: + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.PodReadinessGate + map: + fields: + - name: conditionType + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.PodResourceClaim + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: resourceClaimName + type: + scalar: string + - name: resourceClaimTemplateName + type: + scalar: string +- name: io.k8s.api.core.v1.PodSchedulingGate + map: + fields: + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.PodSecurityContext + map: + fields: + - name: appArmorProfile + type: + namedType: io.k8s.api.core.v1.AppArmorProfile + - name: fsGroup + type: + scalar: numeric + - name: fsGroupChangePolicy + type: + scalar: string + - name: runAsGroup + type: + scalar: numeric + - name: runAsNonRoot + type: + scalar: boolean + - name: runAsUser + type: + scalar: numeric + - name: seLinuxChangePolicy + type: + scalar: string + - name: seLinuxOptions + type: + namedType: io.k8s.api.core.v1.SELinuxOptions + - name: seccompProfile + type: + namedType: io.k8s.api.core.v1.SeccompProfile + - name: supplementalGroups + type: + list: + elementType: + scalar: numeric + elementRelationship: atomic + - name: supplementalGroupsPolicy + type: + scalar: string + - name: sysctls + type: + list: + elementType: + namedType: io.k8s.api.core.v1.Sysctl + elementRelationship: atomic + - name: windowsOptions + type: + namedType: io.k8s.api.core.v1.WindowsSecurityContextOptions +- name: io.k8s.api.core.v1.PodSpec + map: + fields: + - name: activeDeadlineSeconds + type: + scalar: numeric + - name: affinity + type: + namedType: io.k8s.api.core.v1.Affinity + - name: automountServiceAccountToken + type: + scalar: boolean + - name: containers + type: + list: + elementType: + namedType: io.k8s.api.core.v1.Container + elementRelationship: associative + keys: + - name + - name: dnsConfig + type: + namedType: io.k8s.api.core.v1.PodDNSConfig + - name: dnsPolicy + type: + scalar: string + - name: enableServiceLinks + type: + scalar: boolean + - name: ephemeralContainers + type: + list: + elementType: + namedType: io.k8s.api.core.v1.EphemeralContainer + elementRelationship: associative + keys: + - name + - name: hostAliases + type: + list: + elementType: + namedType: io.k8s.api.core.v1.HostAlias + elementRelationship: associative + keys: + - ip + - name: hostIPC + type: + scalar: boolean + - name: hostNetwork + type: + scalar: boolean + - name: hostPID + type: + scalar: boolean + - name: hostUsers + type: + scalar: boolean + - name: hostname + type: + scalar: string + - name: imagePullSecrets + type: + list: + elementType: + namedType: io.k8s.api.core.v1.LocalObjectReference + elementRelationship: associative + keys: + - name + - name: initContainers + type: + list: + elementType: + namedType: io.k8s.api.core.v1.Container + elementRelationship: associative + keys: + - name + - name: nodeName + type: + scalar: string + - name: nodeSelector + type: + map: + elementType: + scalar: string + elementRelationship: atomic + - name: os + type: + namedType: io.k8s.api.core.v1.PodOS + - name: overhead + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: preemptionPolicy + type: + scalar: string + - name: priority + type: + scalar: numeric + - name: priorityClassName + type: + scalar: string + - name: readinessGates + type: + list: + elementType: + namedType: io.k8s.api.core.v1.PodReadinessGate + elementRelationship: atomic + - name: resourceClaims + type: + list: + elementType: + namedType: io.k8s.api.core.v1.PodResourceClaim + elementRelationship: associative + keys: + - name + - name: resources + type: + namedType: io.k8s.api.core.v1.ResourceRequirements + - name: restartPolicy + type: + scalar: string + - name: runtimeClassName + type: + scalar: string + - name: schedulerName + type: + scalar: string + - name: schedulingGates + type: + list: + elementType: + namedType: io.k8s.api.core.v1.PodSchedulingGate + elementRelationship: associative + keys: + - name + - name: securityContext + type: + namedType: io.k8s.api.core.v1.PodSecurityContext + - name: serviceAccount + type: + scalar: string + - name: serviceAccountName + type: + scalar: string + - name: setHostnameAsFQDN + type: + scalar: boolean + - name: shareProcessNamespace + type: + scalar: boolean + - name: subdomain + type: + scalar: string + - name: terminationGracePeriodSeconds + type: + scalar: numeric + - name: tolerations + type: + list: + elementType: + namedType: io.k8s.api.core.v1.Toleration + elementRelationship: atomic + - name: topologySpreadConstraints + type: + list: + elementType: + namedType: io.k8s.api.core.v1.TopologySpreadConstraint + elementRelationship: associative + keys: + - topologyKey + - whenUnsatisfiable + - name: volumes + type: + list: + elementType: + namedType: io.k8s.api.core.v1.Volume + elementRelationship: associative + keys: + - name +- name: io.k8s.api.core.v1.PodTemplateSpec + map: + fields: + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.core.v1.PodSpec + default: {} +- name: io.k8s.api.core.v1.PortworxVolumeSource + map: + fields: + - name: fsType + type: + scalar: string + - name: readOnly + type: + scalar: boolean + - name: volumeID + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.PreferredSchedulingTerm + map: + fields: + - name: preference + type: + namedType: io.k8s.api.core.v1.NodeSelectorTerm + default: {} + - name: weight + type: + scalar: numeric + default: 0 +- name: io.k8s.api.core.v1.Probe + map: + fields: + - name: exec + type: + namedType: io.k8s.api.core.v1.ExecAction + - name: failureThreshold + type: + scalar: numeric + - name: grpc + type: + namedType: io.k8s.api.core.v1.GRPCAction + - name: httpGet + type: + namedType: io.k8s.api.core.v1.HTTPGetAction + - name: initialDelaySeconds + type: + scalar: numeric + - name: periodSeconds + type: + scalar: numeric + - name: successThreshold + type: + scalar: numeric + - name: tcpSocket + type: + namedType: io.k8s.api.core.v1.TCPSocketAction + - name: terminationGracePeriodSeconds + type: + scalar: numeric + - name: timeoutSeconds + type: + scalar: numeric +- name: io.k8s.api.core.v1.ProjectedVolumeSource + map: + fields: + - name: defaultMode + type: + scalar: numeric + - name: sources + type: + list: + elementType: + namedType: io.k8s.api.core.v1.VolumeProjection + elementRelationship: atomic +- name: io.k8s.api.core.v1.QuobyteVolumeSource + map: + fields: + - name: group + type: + scalar: string + - name: readOnly + type: + scalar: boolean + - name: registry + type: + scalar: string + default: "" + - name: tenant + type: + scalar: string + - name: user + type: + scalar: string + - name: volume + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.RBDVolumeSource + map: + fields: + - name: fsType + type: + scalar: string + - name: image + type: + scalar: string + default: "" + - name: keyring + type: + scalar: string + default: /etc/ceph/keyring + - name: monitors + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: pool + type: + scalar: string + default: rbd + - name: readOnly + type: + scalar: boolean + - name: secretRef + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: user + type: + scalar: string + default: admin +- name: io.k8s.api.core.v1.ResourceClaim + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: request + type: + scalar: string +- name: io.k8s.api.core.v1.ResourceFieldSelector + map: + fields: + - name: containerName + type: + scalar: string + - name: divisor + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: resource + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.api.core.v1.ResourceRequirements + map: + fields: + - name: claims + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ResourceClaim + elementRelationship: associative + keys: + - name + - name: limits + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: requests + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.core.v1.SELinuxOptions + map: + fields: + - name: level + type: + scalar: string + - name: role + type: + scalar: string + - name: type + type: + scalar: string + - name: user + type: + scalar: string +- name: io.k8s.api.core.v1.ScaleIOVolumeSource + map: + fields: + - name: fsType + type: + scalar: string + default: xfs + - name: gateway + type: + scalar: string + default: "" + - name: protectionDomain + type: + scalar: string + - name: readOnly + type: + scalar: boolean + - name: secretRef + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: sslEnabled + type: + scalar: boolean + - name: storageMode + type: + scalar: string + default: ThinProvisioned + - name: storagePool + type: + scalar: string + - name: system + type: + scalar: string + default: "" + - name: volumeName + type: + scalar: string +- name: io.k8s.api.core.v1.SeccompProfile + map: + fields: + - name: localhostProfile + type: + scalar: string + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: localhostProfile + discriminatorValue: LocalhostProfile +- name: io.k8s.api.core.v1.SecretEnvSource + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: optional + type: + scalar: boolean +- name: io.k8s.api.core.v1.SecretKeySelector + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: optional + type: + scalar: boolean + elementRelationship: atomic +- name: io.k8s.api.core.v1.SecretProjection + map: + fields: + - name: items + type: + list: + elementType: + namedType: io.k8s.api.core.v1.KeyToPath + elementRelationship: atomic + - name: name + type: + scalar: string + default: "" + - name: optional + type: + scalar: boolean +- name: io.k8s.api.core.v1.SecretVolumeSource + map: + fields: + - name: defaultMode + type: + scalar: numeric + - name: items + type: + list: + elementType: + namedType: io.k8s.api.core.v1.KeyToPath + elementRelationship: atomic + - name: optional + type: + scalar: boolean + - name: secretName + type: + scalar: string +- name: io.k8s.api.core.v1.SecurityContext + map: + fields: + - name: allowPrivilegeEscalation + type: + scalar: boolean + - name: appArmorProfile + type: + namedType: io.k8s.api.core.v1.AppArmorProfile + - name: capabilities + type: + namedType: io.k8s.api.core.v1.Capabilities + - name: privileged + type: + scalar: boolean + - name: procMount + type: + scalar: string + - name: readOnlyRootFilesystem + type: + scalar: boolean + - name: runAsGroup + type: + scalar: numeric + - name: runAsNonRoot + type: + scalar: boolean + - name: runAsUser + type: + scalar: numeric + - name: seLinuxOptions + type: + namedType: io.k8s.api.core.v1.SELinuxOptions + - name: seccompProfile + type: + namedType: io.k8s.api.core.v1.SeccompProfile + - name: windowsOptions + type: + namedType: io.k8s.api.core.v1.WindowsSecurityContextOptions +- name: io.k8s.api.core.v1.ServiceAccountTokenProjection + map: + fields: + - name: audience + type: + scalar: string + - name: expirationSeconds + type: + scalar: numeric + - name: path + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.SleepAction + map: + fields: + - name: seconds + type: + scalar: numeric + default: 0 +- name: io.k8s.api.core.v1.StorageOSVolumeSource + map: + fields: + - name: fsType + type: + scalar: string + - name: readOnly + type: + scalar: boolean + - name: secretRef + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: volumeName + type: + scalar: string + - name: volumeNamespace + type: + scalar: string +- name: io.k8s.api.core.v1.Sysctl + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: value + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.TCPSocketAction + map: + fields: + - name: host + type: + scalar: string + - name: port + type: + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString +- name: io.k8s.api.core.v1.Toleration + map: + fields: + - name: effect + type: + scalar: string + - name: key + type: + scalar: string + - name: operator + type: + scalar: string + - name: tolerationSeconds + type: + scalar: numeric + - name: value + type: + scalar: string +- name: io.k8s.api.core.v1.TopologySpreadConstraint + map: + fields: + - name: labelSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: matchLabelKeys + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: maxSkew + type: + scalar: numeric + default: 0 + - name: minDomains + type: + scalar: numeric + - name: nodeAffinityPolicy + type: + scalar: string + - name: nodeTaintsPolicy + type: + scalar: string + - name: topologyKey + type: + scalar: string + default: "" + - name: whenUnsatisfiable + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.TypedLocalObjectReference + map: + fields: + - name: apiGroup + type: + scalar: string + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.api.core.v1.TypedObjectReference + map: + fields: + - name: apiGroup + type: + scalar: string + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: namespace + type: + scalar: string +- name: io.k8s.api.core.v1.Volume + map: + fields: + - name: awsElasticBlockStore + type: + namedType: io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource + - name: azureDisk + type: + namedType: io.k8s.api.core.v1.AzureDiskVolumeSource + - name: azureFile + type: + namedType: io.k8s.api.core.v1.AzureFileVolumeSource + - name: cephfs + type: + namedType: io.k8s.api.core.v1.CephFSVolumeSource + - name: cinder + type: + namedType: io.k8s.api.core.v1.CinderVolumeSource + - name: configMap + type: + namedType: io.k8s.api.core.v1.ConfigMapVolumeSource + - name: csi + type: + namedType: io.k8s.api.core.v1.CSIVolumeSource + - name: downwardAPI + type: + namedType: io.k8s.api.core.v1.DownwardAPIVolumeSource + - name: emptyDir + type: + namedType: io.k8s.api.core.v1.EmptyDirVolumeSource + - name: ephemeral + type: + namedType: io.k8s.api.core.v1.EphemeralVolumeSource + - name: fc + type: + namedType: io.k8s.api.core.v1.FCVolumeSource + - name: flexVolume + type: + namedType: io.k8s.api.core.v1.FlexVolumeSource + - name: flocker + type: + namedType: io.k8s.api.core.v1.FlockerVolumeSource + - name: gcePersistentDisk + type: + namedType: io.k8s.api.core.v1.GCEPersistentDiskVolumeSource + - name: gitRepo + type: + namedType: io.k8s.api.core.v1.GitRepoVolumeSource + - name: glusterfs + type: + namedType: io.k8s.api.core.v1.GlusterfsVolumeSource + - name: hostPath + type: + namedType: io.k8s.api.core.v1.HostPathVolumeSource + - name: image + type: + namedType: io.k8s.api.core.v1.ImageVolumeSource + - name: iscsi + type: + namedType: io.k8s.api.core.v1.ISCSIVolumeSource + - name: name + type: + scalar: string + default: "" + - name: nfs + type: + namedType: io.k8s.api.core.v1.NFSVolumeSource + - name: persistentVolumeClaim + type: + namedType: io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource + - name: photonPersistentDisk + type: + namedType: io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource + - name: portworxVolume + type: + namedType: io.k8s.api.core.v1.PortworxVolumeSource + - name: projected + type: + namedType: io.k8s.api.core.v1.ProjectedVolumeSource + - name: quobyte + type: + namedType: io.k8s.api.core.v1.QuobyteVolumeSource + - name: rbd + type: + namedType: io.k8s.api.core.v1.RBDVolumeSource + - name: scaleIO + type: + namedType: io.k8s.api.core.v1.ScaleIOVolumeSource + - name: secret + type: + namedType: io.k8s.api.core.v1.SecretVolumeSource + - name: storageos + type: + namedType: io.k8s.api.core.v1.StorageOSVolumeSource + - name: vsphereVolume + type: + namedType: io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource +- name: io.k8s.api.core.v1.VolumeDevice + map: + fields: + - name: devicePath + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.VolumeMount + map: + fields: + - name: mountPath + type: + scalar: string + default: "" + - name: mountPropagation + type: + scalar: string + - name: name + type: + scalar: string + default: "" + - name: readOnly + type: + scalar: boolean + - name: recursiveReadOnly + type: + scalar: string + - name: subPath + type: + scalar: string + - name: subPathExpr + type: + scalar: string +- name: io.k8s.api.core.v1.VolumeProjection + map: + fields: + - name: clusterTrustBundle + type: + namedType: io.k8s.api.core.v1.ClusterTrustBundleProjection + - name: configMap + type: + namedType: io.k8s.api.core.v1.ConfigMapProjection + - name: downwardAPI + type: + namedType: io.k8s.api.core.v1.DownwardAPIProjection + - name: secret + type: + namedType: io.k8s.api.core.v1.SecretProjection + - name: serviceAccountToken + type: + namedType: io.k8s.api.core.v1.ServiceAccountTokenProjection +- name: io.k8s.api.core.v1.VolumeResourceRequirements + map: + fields: + - name: limits + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: requests + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource + map: + fields: + - name: fsType + type: + scalar: string + - name: storagePolicyID + type: + scalar: string + - name: storagePolicyName + type: + scalar: string + - name: volumePath + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.WeightedPodAffinityTerm + map: + fields: + - name: podAffinityTerm + type: + namedType: io.k8s.api.core.v1.PodAffinityTerm + default: {} + - name: weight + type: + scalar: numeric + default: 0 +- name: io.k8s.api.core.v1.WindowsSecurityContextOptions + map: + fields: + - name: gmsaCredentialSpec + type: + scalar: string + - name: gmsaCredentialSpecName + type: + scalar: string + - name: hostProcess + type: + scalar: boolean + - name: runAsUserName + type: + scalar: string +- name: io.k8s.apimachinery.pkg.api.resource.Quantity + scalar: untyped +- name: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + map: + fields: + - name: matchExpressions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement + elementRelationship: atomic + - name: matchLabels + type: + map: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: operator + type: + scalar: string + default: "" + - name: values + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldsType + type: + scalar: string + - name: fieldsV1 + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + - name: manager + type: + scalar: string + - name: operation + type: + scalar: string + - name: subresource + type: + scalar: string + - name: time + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + map: + fields: + - name: annotations + type: + map: + elementType: + scalar: string + - name: creationTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: deletionGracePeriodSeconds + type: + scalar: numeric + - name: deletionTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: finalizers + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: generateName + type: + scalar: string + - name: generation + type: + scalar: numeric + - name: labels + type: + map: + elementType: + scalar: string + - name: managedFields + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + elementRelationship: atomic + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: ownerReferences + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + elementRelationship: associative + keys: + - uid + - name: resourceVersion + type: + scalar: string + - name: selfLink + type: + scalar: string + - name: uid + type: + scalar: string +- name: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + map: + fields: + - name: apiVersion + type: + scalar: string + default: "" + - name: blockOwnerDeletion + type: + scalar: boolean + - name: controller + type: + scalar: boolean + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: uid + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Time + scalar: untyped +- name: io.k8s.apimachinery.pkg.util.intstr.IntOrString + scalar: untyped +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +- name: __untyped_deduced_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +`) diff --git a/vendor/github.com/openshift/client-go/apps/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/apps/clientset/versioned/clientset.go new file mode 100644 index 0000000000000..516edbee64ed7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/clientset/versioned/clientset.go @@ -0,0 +1,104 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + fmt "fmt" + http "net/http" + + appsv1 "github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + AppsV1() appsv1.AppsV1Interface +} + +// Clientset contains the clients for groups. +type Clientset struct { + *discovery.DiscoveryClient + appsV1 *appsv1.AppsV1Client +} + +// AppsV1 retrieves the AppsV1Client +func (c *Clientset) AppsV1() appsv1.AppsV1Interface { + return c.appsV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.appsV1, err = appsv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.appsV1 = appsv1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/apps/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/client-go/apps/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000000..14db57a58f8d2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/clientset/versioned/scheme/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/openshift/client-go/apps/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/apps/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000000..014032ff9aefe --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/clientset/versioned/scheme/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + appsv1 "github.com/openshift/api/apps/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + appsv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1/apps_client.go b/vendor/github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1/apps_client.go new file mode 100644 index 0000000000000..e368d6a64c4ba --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1/apps_client.go @@ -0,0 +1,91 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + http "net/http" + + appsv1 "github.com/openshift/api/apps/v1" + scheme "github.com/openshift/client-go/apps/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type AppsV1Interface interface { + RESTClient() rest.Interface + DeploymentConfigsGetter +} + +// AppsV1Client is used to interact with features provided by the apps.openshift.io group. +type AppsV1Client struct { + restClient rest.Interface +} + +func (c *AppsV1Client) DeploymentConfigs(namespace string) DeploymentConfigInterface { + return newDeploymentConfigs(c, namespace) +} + +// NewForConfig creates a new AppsV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*AppsV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AppsV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AppsV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &AppsV1Client{client}, nil +} + +// NewForConfigOrDie creates a new AppsV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AppsV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AppsV1Client for the given RESTClient. +func New(c rest.Interface) *AppsV1Client { + return &AppsV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := appsv1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AppsV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1/deploymentconfig.go b/vendor/github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1/deploymentconfig.go new file mode 100644 index 0000000000000..a7e3bb7dfee13 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1/deploymentconfig.go @@ -0,0 +1,123 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + appsv1 "github.com/openshift/api/apps/v1" + applyconfigurationsappsv1 "github.com/openshift/client-go/apps/applyconfigurations/apps/v1" + scheme "github.com/openshift/client-go/apps/clientset/versioned/scheme" + v1beta1 "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// DeploymentConfigsGetter has a method to return a DeploymentConfigInterface. +// A group's client should implement this interface. +type DeploymentConfigsGetter interface { + DeploymentConfigs(namespace string) DeploymentConfigInterface +} + +// DeploymentConfigInterface has methods to work with DeploymentConfig resources. +type DeploymentConfigInterface interface { + Create(ctx context.Context, deploymentConfig *appsv1.DeploymentConfig, opts metav1.CreateOptions) (*appsv1.DeploymentConfig, error) + Update(ctx context.Context, deploymentConfig *appsv1.DeploymentConfig, opts metav1.UpdateOptions) (*appsv1.DeploymentConfig, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, deploymentConfig *appsv1.DeploymentConfig, opts metav1.UpdateOptions) (*appsv1.DeploymentConfig, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*appsv1.DeploymentConfig, error) + List(ctx context.Context, opts metav1.ListOptions) (*appsv1.DeploymentConfigList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *appsv1.DeploymentConfig, err error) + Apply(ctx context.Context, deploymentConfig *applyconfigurationsappsv1.DeploymentConfigApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.DeploymentConfig, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, deploymentConfig *applyconfigurationsappsv1.DeploymentConfigApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.DeploymentConfig, err error) + Instantiate(ctx context.Context, deploymentConfigName string, deploymentRequest *appsv1.DeploymentRequest, opts metav1.CreateOptions) (*appsv1.DeploymentConfig, error) + Rollback(ctx context.Context, deploymentConfigName string, deploymentConfigRollback *appsv1.DeploymentConfigRollback, opts metav1.CreateOptions) (*appsv1.DeploymentConfig, error) + GetScale(ctx context.Context, deploymentConfigName string, options metav1.GetOptions) (*v1beta1.Scale, error) + UpdateScale(ctx context.Context, deploymentConfigName string, scale *v1beta1.Scale, opts metav1.UpdateOptions) (*v1beta1.Scale, error) + + DeploymentConfigExpansion +} + +// deploymentConfigs implements DeploymentConfigInterface +type deploymentConfigs struct { + *gentype.ClientWithListAndApply[*appsv1.DeploymentConfig, *appsv1.DeploymentConfigList, *applyconfigurationsappsv1.DeploymentConfigApplyConfiguration] +} + +// newDeploymentConfigs returns a DeploymentConfigs +func newDeploymentConfigs(c *AppsV1Client, namespace string) *deploymentConfigs { + return &deploymentConfigs{ + gentype.NewClientWithListAndApply[*appsv1.DeploymentConfig, *appsv1.DeploymentConfigList, *applyconfigurationsappsv1.DeploymentConfigApplyConfiguration]( + "deploymentconfigs", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *appsv1.DeploymentConfig { return &appsv1.DeploymentConfig{} }, + func() *appsv1.DeploymentConfigList { return &appsv1.DeploymentConfigList{} }, + ), + } +} + +// Instantiate takes the representation of a deploymentRequest and creates it. Returns the server's representation of the deploymentConfig, and an error, if there is any. +func (c *deploymentConfigs) Instantiate(ctx context.Context, deploymentConfigName string, deploymentRequest *appsv1.DeploymentRequest, opts metav1.CreateOptions) (result *appsv1.DeploymentConfig, err error) { + result = &appsv1.DeploymentConfig{} + err = c.GetClient().Post(). + Namespace(c.GetNamespace()). + Resource("deploymentconfigs"). + Name(deploymentConfigName). + SubResource("instantiate"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(deploymentRequest). + Do(ctx). + Into(result) + return +} + +// Rollback takes the representation of a deploymentConfigRollback and creates it. Returns the server's representation of the deploymentConfig, and an error, if there is any. +func (c *deploymentConfigs) Rollback(ctx context.Context, deploymentConfigName string, deploymentConfigRollback *appsv1.DeploymentConfigRollback, opts metav1.CreateOptions) (result *appsv1.DeploymentConfig, err error) { + result = &appsv1.DeploymentConfig{} + err = c.GetClient().Post(). + Namespace(c.GetNamespace()). + Resource("deploymentconfigs"). + Name(deploymentConfigName). + SubResource("rollback"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(deploymentConfigRollback). + Do(ctx). + Into(result) + return +} + +// GetScale takes name of the deploymentConfig, and returns the corresponding v1beta1.Scale object, and an error if there is any. +func (c *deploymentConfigs) GetScale(ctx context.Context, deploymentConfigName string, options metav1.GetOptions) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). + Resource("deploymentconfigs"). + Name(deploymentConfigName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *deploymentConfigs) UpdateScale(ctx context.Context, deploymentConfigName string, scale *v1beta1.Scale, opts metav1.UpdateOptions) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). + Resource("deploymentconfigs"). + Name(deploymentConfigName). + SubResource("scale"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(scale). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1/doc.go b/vendor/github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1/doc.go new file mode 100644 index 0000000000000..225e6b2be34f2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1/generated_expansion.go new file mode 100644 index 0000000000000..0545f8c09a154 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1/generated_expansion.go @@ -0,0 +1,5 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type DeploymentConfigExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/apps/informers/externalversions/apps/interface.go b/vendor/github.com/openshift/client-go/apps/informers/externalversions/apps/interface.go new file mode 100644 index 0000000000000..a10023dd51756 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/informers/externalversions/apps/interface.go @@ -0,0 +1,30 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package apps + +import ( + v1 "github.com/openshift/client-go/apps/informers/externalversions/apps/v1" + internalinterfaces "github.com/openshift/client-go/apps/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/apps/informers/externalversions/apps/v1/deploymentconfig.go b/vendor/github.com/openshift/client-go/apps/informers/externalversions/apps/v1/deploymentconfig.go new file mode 100644 index 0000000000000..b741a6320f096 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/informers/externalversions/apps/v1/deploymentconfig.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiappsv1 "github.com/openshift/api/apps/v1" + versioned "github.com/openshift/client-go/apps/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/apps/informers/externalversions/internalinterfaces" + appsv1 "github.com/openshift/client-go/apps/listers/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// DeploymentConfigInformer provides access to a shared informer and lister for +// DeploymentConfigs. +type DeploymentConfigInformer interface { + Informer() cache.SharedIndexInformer + Lister() appsv1.DeploymentConfigLister +} + +type deploymentConfigInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDeploymentConfigInformer constructs a new informer for DeploymentConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDeploymentConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDeploymentConfigInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDeploymentConfigInformer constructs a new informer for DeploymentConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDeploymentConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().DeploymentConfigs(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().DeploymentConfigs(namespace).Watch(context.TODO(), options) + }, + }, + &apiappsv1.DeploymentConfig{}, + resyncPeriod, + indexers, + ) +} + +func (f *deploymentConfigInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDeploymentConfigInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *deploymentConfigInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiappsv1.DeploymentConfig{}, f.defaultInformer) +} + +func (f *deploymentConfigInformer) Lister() appsv1.DeploymentConfigLister { + return appsv1.NewDeploymentConfigLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/apps/informers/externalversions/apps/v1/interface.go b/vendor/github.com/openshift/client-go/apps/informers/externalversions/apps/v1/interface.go new file mode 100644 index 0000000000000..e95a869d2f6f0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/informers/externalversions/apps/v1/interface.go @@ -0,0 +1,29 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/openshift/client-go/apps/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // DeploymentConfigs returns a DeploymentConfigInformer. + DeploymentConfigs() DeploymentConfigInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// DeploymentConfigs returns a DeploymentConfigInformer. +func (v *version) DeploymentConfigs() DeploymentConfigInformer { + return &deploymentConfigInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/apps/informers/externalversions/factory.go b/vendor/github.com/openshift/client-go/apps/informers/externalversions/factory.go new file mode 100644 index 0000000000000..6eb7324817809 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/informers/externalversions/factory.go @@ -0,0 +1,246 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/openshift/client-go/apps/clientset/versioned" + apps "github.com/openshift/client-go/apps/informers/externalversions/apps" + internalinterfaces "github.com/openshift/client-go/apps/informers/externalversions/internalinterfaces" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + transform cache.TransformFunc + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.shuttingDown { + return + } + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() + f.startedInformers[informerType] = true + } + } +} + +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + + Apps() apps.Interface +} + +func (f *sharedInformerFactory) Apps() apps.Interface { + return apps.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/apps/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/apps/informers/externalversions/generic.go new file mode 100644 index 0000000000000..c88774584f69e --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/informers/externalversions/generic.go @@ -0,0 +1,46 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + fmt "fmt" + + v1 "github.com/openshift/api/apps/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=apps.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("deploymentconfigs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().DeploymentConfigs().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/openshift/client-go/apps/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/openshift/client-go/apps/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000000..308cc551b44fb --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,24 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/openshift/client-go/apps/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/openshift/client-go/apps/listers/apps/v1/deploymentconfig.go b/vendor/github.com/openshift/client-go/apps/listers/apps/v1/deploymentconfig.go new file mode 100644 index 0000000000000..074682e441a31 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/listers/apps/v1/deploymentconfig.go @@ -0,0 +1,54 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + appsv1 "github.com/openshift/api/apps/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// DeploymentConfigLister helps list DeploymentConfigs. +// All objects returned here must be treated as read-only. +type DeploymentConfigLister interface { + // List lists all DeploymentConfigs in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*appsv1.DeploymentConfig, err error) + // DeploymentConfigs returns an object that can list and get DeploymentConfigs. + DeploymentConfigs(namespace string) DeploymentConfigNamespaceLister + DeploymentConfigListerExpansion +} + +// deploymentConfigLister implements the DeploymentConfigLister interface. +type deploymentConfigLister struct { + listers.ResourceIndexer[*appsv1.DeploymentConfig] +} + +// NewDeploymentConfigLister returns a new DeploymentConfigLister. +func NewDeploymentConfigLister(indexer cache.Indexer) DeploymentConfigLister { + return &deploymentConfigLister{listers.New[*appsv1.DeploymentConfig](indexer, appsv1.Resource("deploymentconfig"))} +} + +// DeploymentConfigs returns an object that can list and get DeploymentConfigs. +func (s *deploymentConfigLister) DeploymentConfigs(namespace string) DeploymentConfigNamespaceLister { + return deploymentConfigNamespaceLister{listers.NewNamespaced[*appsv1.DeploymentConfig](s.ResourceIndexer, namespace)} +} + +// DeploymentConfigNamespaceLister helps list and get DeploymentConfigs. +// All objects returned here must be treated as read-only. +type DeploymentConfigNamespaceLister interface { + // List lists all DeploymentConfigs in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*appsv1.DeploymentConfig, err error) + // Get retrieves the DeploymentConfig from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*appsv1.DeploymentConfig, error) + DeploymentConfigNamespaceListerExpansion +} + +// deploymentConfigNamespaceLister implements the DeploymentConfigNamespaceLister +// interface. +type deploymentConfigNamespaceLister struct { + listers.ResourceIndexer[*appsv1.DeploymentConfig] +} diff --git a/vendor/github.com/openshift/client-go/apps/listers/apps/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/apps/listers/apps/v1/expansion_generated.go new file mode 100644 index 0000000000000..8a7cce0eb3316 --- /dev/null +++ b/vendor/github.com/openshift/client-go/apps/listers/apps/v1/expansion_generated.go @@ -0,0 +1,11 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// DeploymentConfigListerExpansion allows custom methods to be added to +// DeploymentConfigLister. +type DeploymentConfigListerExpansion interface{} + +// DeploymentConfigNamespaceListerExpansion allows custom methods to be added to +// DeploymentConfigNamespaceLister. +type DeploymentConfigNamespaceListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/clusterrole.go b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/clusterrole.go new file mode 100644 index 0000000000000..e09f47e9a7c86 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/clusterrole.go @@ -0,0 +1,252 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + authorizationv1 "github.com/openshift/api/authorization/v1" + internal "github.com/openshift/client-go/authorization/applyconfigurations/internal" + rbacv1 "k8s.io/api/rbac/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterRoleApplyConfiguration represents a declarative configuration of the ClusterRole type for use +// with apply. +type ClusterRoleApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"` + AggregationRule *rbacv1.AggregationRule `json:"aggregationRule,omitempty"` +} + +// ClusterRole constructs a declarative configuration of the ClusterRole type for use with +// apply. +func ClusterRole(name string) *ClusterRoleApplyConfiguration { + b := &ClusterRoleApplyConfiguration{} + b.WithName(name) + b.WithKind("ClusterRole") + b.WithAPIVersion("authorization.openshift.io/v1") + return b +} + +// ExtractClusterRole extracts the applied configuration owned by fieldManager from +// clusterRole. If no managedFields are found in clusterRole for fieldManager, a +// ClusterRoleApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// clusterRole must be a unmodified ClusterRole API object that was retrieved from the Kubernetes API. +// ExtractClusterRole provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractClusterRole(clusterRole *authorizationv1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) { + return extractClusterRole(clusterRole, fieldManager, "") +} + +// ExtractClusterRoleStatus is the same as ExtractClusterRole except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractClusterRoleStatus(clusterRole *authorizationv1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) { + return extractClusterRole(clusterRole, fieldManager, "status") +} + +func extractClusterRole(clusterRole *authorizationv1.ClusterRole, fieldManager string, subresource string) (*ClusterRoleApplyConfiguration, error) { + b := &ClusterRoleApplyConfiguration{} + err := managedfields.ExtractInto(clusterRole, internal.Parser().Type("com.github.openshift.api.authorization.v1.ClusterRole"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(clusterRole.Name) + + b.WithKind("ClusterRole") + b.WithAPIVersion("authorization.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ClusterRoleApplyConfiguration) WithKind(value string) *ClusterRoleApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ClusterRoleApplyConfiguration) WithAPIVersion(value string) *ClusterRoleApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ClusterRoleApplyConfiguration) WithName(value string) *ClusterRoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ClusterRoleApplyConfiguration) WithGenerateName(value string) *ClusterRoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ClusterRoleApplyConfiguration) WithNamespace(value string) *ClusterRoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ClusterRoleApplyConfiguration) WithUID(value types.UID) *ClusterRoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ClusterRoleApplyConfiguration) WithResourceVersion(value string) *ClusterRoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ClusterRoleApplyConfiguration) WithGeneration(value int64) *ClusterRoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ClusterRoleApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ClusterRoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ClusterRoleApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ClusterRoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ClusterRoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterRoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ClusterRoleApplyConfiguration) WithLabels(entries map[string]string) *ClusterRoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ClusterRoleApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterRoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ClusterRoleApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ClusterRoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ClusterRoleApplyConfiguration) WithFinalizers(values ...string) *ClusterRoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ClusterRoleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithRules adds the given value to the Rules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Rules field. +func (b *ClusterRoleApplyConfiguration) WithRules(values ...*PolicyRuleApplyConfiguration) *ClusterRoleApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRules") + } + b.Rules = append(b.Rules, *values[i]) + } + return b +} + +// WithAggregationRule sets the AggregationRule field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AggregationRule field is set to the value of the last call. +func (b *ClusterRoleApplyConfiguration) WithAggregationRule(value rbacv1.AggregationRule) *ClusterRoleApplyConfiguration { + b.AggregationRule = &value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/clusterrolebinding.go b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/clusterrolebinding.go new file mode 100644 index 0000000000000..428e90b0455be --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/clusterrolebinding.go @@ -0,0 +1,267 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + authorizationv1 "github.com/openshift/api/authorization/v1" + internal "github.com/openshift/client-go/authorization/applyconfigurations/internal" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterRoleBindingApplyConfiguration represents a declarative configuration of the ClusterRoleBinding type for use +// with apply. +type ClusterRoleBindingApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + UserNames *authorizationv1.OptionalNames `json:"userNames,omitempty"` + GroupNames *authorizationv1.OptionalNames `json:"groupNames,omitempty"` + Subjects []corev1.ObjectReference `json:"subjects,omitempty"` + RoleRef *corev1.ObjectReference `json:"roleRef,omitempty"` +} + +// ClusterRoleBinding constructs a declarative configuration of the ClusterRoleBinding type for use with +// apply. +func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration { + b := &ClusterRoleBindingApplyConfiguration{} + b.WithName(name) + b.WithKind("ClusterRoleBinding") + b.WithAPIVersion("authorization.openshift.io/v1") + return b +} + +// ExtractClusterRoleBinding extracts the applied configuration owned by fieldManager from +// clusterRoleBinding. If no managedFields are found in clusterRoleBinding for fieldManager, a +// ClusterRoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// clusterRoleBinding must be a unmodified ClusterRoleBinding API object that was retrieved from the Kubernetes API. +// ExtractClusterRoleBinding provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractClusterRoleBinding(clusterRoleBinding *authorizationv1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) { + return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "") +} + +// ExtractClusterRoleBindingStatus is the same as ExtractClusterRoleBinding except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractClusterRoleBindingStatus(clusterRoleBinding *authorizationv1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) { + return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "status") +} + +func extractClusterRoleBinding(clusterRoleBinding *authorizationv1.ClusterRoleBinding, fieldManager string, subresource string) (*ClusterRoleBindingApplyConfiguration, error) { + b := &ClusterRoleBindingApplyConfiguration{} + err := managedfields.ExtractInto(clusterRoleBinding, internal.Parser().Type("com.github.openshift.api.authorization.v1.ClusterRoleBinding"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(clusterRoleBinding.Name) + + b.WithKind("ClusterRoleBinding") + b.WithAPIVersion("authorization.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ClusterRoleBindingApplyConfiguration) WithKind(value string) *ClusterRoleBindingApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ClusterRoleBindingApplyConfiguration) WithAPIVersion(value string) *ClusterRoleBindingApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ClusterRoleBindingApplyConfiguration) WithName(value string) *ClusterRoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ClusterRoleBindingApplyConfiguration) WithGenerateName(value string) *ClusterRoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ClusterRoleBindingApplyConfiguration) WithNamespace(value string) *ClusterRoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ClusterRoleBindingApplyConfiguration) WithUID(value types.UID) *ClusterRoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ClusterRoleBindingApplyConfiguration) WithResourceVersion(value string) *ClusterRoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ClusterRoleBindingApplyConfiguration) WithGeneration(value int64) *ClusterRoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ClusterRoleBindingApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ClusterRoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ClusterRoleBindingApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ClusterRoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ClusterRoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterRoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ClusterRoleBindingApplyConfiguration) WithLabels(entries map[string]string) *ClusterRoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ClusterRoleBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterRoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ClusterRoleBindingApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ClusterRoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ClusterRoleBindingApplyConfiguration) WithFinalizers(values ...string) *ClusterRoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ClusterRoleBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithUserNames sets the UserNames field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UserNames field is set to the value of the last call. +func (b *ClusterRoleBindingApplyConfiguration) WithUserNames(value authorizationv1.OptionalNames) *ClusterRoleBindingApplyConfiguration { + b.UserNames = &value + return b +} + +// WithGroupNames sets the GroupNames field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GroupNames field is set to the value of the last call. +func (b *ClusterRoleBindingApplyConfiguration) WithGroupNames(value authorizationv1.OptionalNames) *ClusterRoleBindingApplyConfiguration { + b.GroupNames = &value + return b +} + +// WithSubjects adds the given value to the Subjects field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Subjects field. +func (b *ClusterRoleBindingApplyConfiguration) WithSubjects(values ...corev1.ObjectReference) *ClusterRoleBindingApplyConfiguration { + for i := range values { + b.Subjects = append(b.Subjects, values[i]) + } + return b +} + +// WithRoleRef sets the RoleRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RoleRef field is set to the value of the last call. +func (b *ClusterRoleBindingApplyConfiguration) WithRoleRef(value corev1.ObjectReference) *ClusterRoleBindingApplyConfiguration { + b.RoleRef = &value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/grouprestriction.go b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/grouprestriction.go new file mode 100644 index 0000000000000..3bb799f3948e1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/grouprestriction.go @@ -0,0 +1,43 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// GroupRestrictionApplyConfiguration represents a declarative configuration of the GroupRestriction type for use +// with apply. +type GroupRestrictionApplyConfiguration struct { + Groups []string `json:"groups,omitempty"` + Selectors []metav1.LabelSelectorApplyConfiguration `json:"labels,omitempty"` +} + +// GroupRestrictionApplyConfiguration constructs a declarative configuration of the GroupRestriction type for use with +// apply. +func GroupRestriction() *GroupRestrictionApplyConfiguration { + return &GroupRestrictionApplyConfiguration{} +} + +// WithGroups adds the given value to the Groups field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Groups field. +func (b *GroupRestrictionApplyConfiguration) WithGroups(values ...string) *GroupRestrictionApplyConfiguration { + for i := range values { + b.Groups = append(b.Groups, values[i]) + } + return b +} + +// WithSelectors adds the given value to the Selectors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Selectors field. +func (b *GroupRestrictionApplyConfiguration) WithSelectors(values ...*metav1.LabelSelectorApplyConfiguration) *GroupRestrictionApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSelectors") + } + b.Selectors = append(b.Selectors, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/policyrule.go b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/policyrule.go new file mode 100644 index 0000000000000..16765a14ee1a5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/policyrule.go @@ -0,0 +1,82 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// PolicyRuleApplyConfiguration represents a declarative configuration of the PolicyRule type for use +// with apply. +type PolicyRuleApplyConfiguration struct { + Verbs []string `json:"verbs,omitempty"` + AttributeRestrictions *runtime.RawExtension `json:"attributeRestrictions,omitempty"` + APIGroups []string `json:"apiGroups,omitempty"` + Resources []string `json:"resources,omitempty"` + ResourceNames []string `json:"resourceNames,omitempty"` + NonResourceURLsSlice []string `json:"nonResourceURLs,omitempty"` +} + +// PolicyRuleApplyConfiguration constructs a declarative configuration of the PolicyRule type for use with +// apply. +func PolicyRule() *PolicyRuleApplyConfiguration { + return &PolicyRuleApplyConfiguration{} +} + +// WithVerbs adds the given value to the Verbs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Verbs field. +func (b *PolicyRuleApplyConfiguration) WithVerbs(values ...string) *PolicyRuleApplyConfiguration { + for i := range values { + b.Verbs = append(b.Verbs, values[i]) + } + return b +} + +// WithAttributeRestrictions sets the AttributeRestrictions field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AttributeRestrictions field is set to the value of the last call. +func (b *PolicyRuleApplyConfiguration) WithAttributeRestrictions(value runtime.RawExtension) *PolicyRuleApplyConfiguration { + b.AttributeRestrictions = &value + return b +} + +// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the APIGroups field. +func (b *PolicyRuleApplyConfiguration) WithAPIGroups(values ...string) *PolicyRuleApplyConfiguration { + for i := range values { + b.APIGroups = append(b.APIGroups, values[i]) + } + return b +} + +// WithResources adds the given value to the Resources field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Resources field. +func (b *PolicyRuleApplyConfiguration) WithResources(values ...string) *PolicyRuleApplyConfiguration { + for i := range values { + b.Resources = append(b.Resources, values[i]) + } + return b +} + +// WithResourceNames adds the given value to the ResourceNames field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceNames field. +func (b *PolicyRuleApplyConfiguration) WithResourceNames(values ...string) *PolicyRuleApplyConfiguration { + for i := range values { + b.ResourceNames = append(b.ResourceNames, values[i]) + } + return b +} + +// WithNonResourceURLsSlice adds the given value to the NonResourceURLsSlice field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NonResourceURLsSlice field. +func (b *PolicyRuleApplyConfiguration) WithNonResourceURLsSlice(values ...string) *PolicyRuleApplyConfiguration { + for i := range values { + b.NonResourceURLsSlice = append(b.NonResourceURLsSlice, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/role.go b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/role.go new file mode 100644 index 0000000000000..57f9f78144caa --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/role.go @@ -0,0 +1,244 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + authorizationv1 "github.com/openshift/api/authorization/v1" + internal "github.com/openshift/client-go/authorization/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// RoleApplyConfiguration represents a declarative configuration of the Role type for use +// with apply. +type RoleApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"` +} + +// Role constructs a declarative configuration of the Role type for use with +// apply. +func Role(name, namespace string) *RoleApplyConfiguration { + b := &RoleApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("Role") + b.WithAPIVersion("authorization.openshift.io/v1") + return b +} + +// ExtractRole extracts the applied configuration owned by fieldManager from +// role. If no managedFields are found in role for fieldManager, a +// RoleApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// role must be a unmodified Role API object that was retrieved from the Kubernetes API. +// ExtractRole provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractRole(role *authorizationv1.Role, fieldManager string) (*RoleApplyConfiguration, error) { + return extractRole(role, fieldManager, "") +} + +// ExtractRoleStatus is the same as ExtractRole except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractRoleStatus(role *authorizationv1.Role, fieldManager string) (*RoleApplyConfiguration, error) { + return extractRole(role, fieldManager, "status") +} + +func extractRole(role *authorizationv1.Role, fieldManager string, subresource string) (*RoleApplyConfiguration, error) { + b := &RoleApplyConfiguration{} + err := managedfields.ExtractInto(role, internal.Parser().Type("com.github.openshift.api.authorization.v1.Role"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(role.Name) + b.WithNamespace(role.Namespace) + + b.WithKind("Role") + b.WithAPIVersion("authorization.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *RoleApplyConfiguration) WithKind(value string) *RoleApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *RoleApplyConfiguration) WithAPIVersion(value string) *RoleApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *RoleApplyConfiguration) WithName(value string) *RoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *RoleApplyConfiguration) WithGenerateName(value string) *RoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *RoleApplyConfiguration) WithNamespace(value string) *RoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *RoleApplyConfiguration) WithUID(value types.UID) *RoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *RoleApplyConfiguration) WithResourceVersion(value string) *RoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *RoleApplyConfiguration) WithGeneration(value int64) *RoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *RoleApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *RoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *RoleApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *RoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *RoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *RoleApplyConfiguration) WithLabels(entries map[string]string) *RoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *RoleApplyConfiguration) WithAnnotations(entries map[string]string) *RoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *RoleApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *RoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *RoleApplyConfiguration) WithFinalizers(values ...string) *RoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *RoleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithRules adds the given value to the Rules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Rules field. +func (b *RoleApplyConfiguration) WithRules(values ...*PolicyRuleApplyConfiguration) *RoleApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRules") + } + b.Rules = append(b.Rules, *values[i]) + } + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/rolebinding.go b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/rolebinding.go new file mode 100644 index 0000000000000..adc5ec78b96f1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/rolebinding.go @@ -0,0 +1,269 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + authorizationv1 "github.com/openshift/api/authorization/v1" + internal "github.com/openshift/client-go/authorization/applyconfigurations/internal" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// RoleBindingApplyConfiguration represents a declarative configuration of the RoleBinding type for use +// with apply. +type RoleBindingApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + UserNames *authorizationv1.OptionalNames `json:"userNames,omitempty"` + GroupNames *authorizationv1.OptionalNames `json:"groupNames,omitempty"` + Subjects []corev1.ObjectReference `json:"subjects,omitempty"` + RoleRef *corev1.ObjectReference `json:"roleRef,omitempty"` +} + +// RoleBinding constructs a declarative configuration of the RoleBinding type for use with +// apply. +func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration { + b := &RoleBindingApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("RoleBinding") + b.WithAPIVersion("authorization.openshift.io/v1") + return b +} + +// ExtractRoleBinding extracts the applied configuration owned by fieldManager from +// roleBinding. If no managedFields are found in roleBinding for fieldManager, a +// RoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// roleBinding must be a unmodified RoleBinding API object that was retrieved from the Kubernetes API. +// ExtractRoleBinding provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractRoleBinding(roleBinding *authorizationv1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) { + return extractRoleBinding(roleBinding, fieldManager, "") +} + +// ExtractRoleBindingStatus is the same as ExtractRoleBinding except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractRoleBindingStatus(roleBinding *authorizationv1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) { + return extractRoleBinding(roleBinding, fieldManager, "status") +} + +func extractRoleBinding(roleBinding *authorizationv1.RoleBinding, fieldManager string, subresource string) (*RoleBindingApplyConfiguration, error) { + b := &RoleBindingApplyConfiguration{} + err := managedfields.ExtractInto(roleBinding, internal.Parser().Type("com.github.openshift.api.authorization.v1.RoleBinding"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(roleBinding.Name) + b.WithNamespace(roleBinding.Namespace) + + b.WithKind("RoleBinding") + b.WithAPIVersion("authorization.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *RoleBindingApplyConfiguration) WithKind(value string) *RoleBindingApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *RoleBindingApplyConfiguration) WithAPIVersion(value string) *RoleBindingApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *RoleBindingApplyConfiguration) WithName(value string) *RoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *RoleBindingApplyConfiguration) WithGenerateName(value string) *RoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *RoleBindingApplyConfiguration) WithNamespace(value string) *RoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *RoleBindingApplyConfiguration) WithUID(value types.UID) *RoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *RoleBindingApplyConfiguration) WithResourceVersion(value string) *RoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *RoleBindingApplyConfiguration) WithGeneration(value int64) *RoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *RoleBindingApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *RoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *RoleBindingApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *RoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *RoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *RoleBindingApplyConfiguration) WithLabels(entries map[string]string) *RoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *RoleBindingApplyConfiguration) WithAnnotations(entries map[string]string) *RoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *RoleBindingApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *RoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *RoleBindingApplyConfiguration) WithFinalizers(values ...string) *RoleBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *RoleBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithUserNames sets the UserNames field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UserNames field is set to the value of the last call. +func (b *RoleBindingApplyConfiguration) WithUserNames(value authorizationv1.OptionalNames) *RoleBindingApplyConfiguration { + b.UserNames = &value + return b +} + +// WithGroupNames sets the GroupNames field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GroupNames field is set to the value of the last call. +func (b *RoleBindingApplyConfiguration) WithGroupNames(value authorizationv1.OptionalNames) *RoleBindingApplyConfiguration { + b.GroupNames = &value + return b +} + +// WithSubjects adds the given value to the Subjects field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Subjects field. +func (b *RoleBindingApplyConfiguration) WithSubjects(values ...corev1.ObjectReference) *RoleBindingApplyConfiguration { + for i := range values { + b.Subjects = append(b.Subjects, values[i]) + } + return b +} + +// WithRoleRef sets the RoleRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RoleRef field is set to the value of the last call. +func (b *RoleBindingApplyConfiguration) WithRoleRef(value corev1.ObjectReference) *RoleBindingApplyConfiguration { + b.RoleRef = &value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/rolebindingrestriction.go b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/rolebindingrestriction.go new file mode 100644 index 0000000000000..0fbec3a153f72 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/rolebindingrestriction.go @@ -0,0 +1,239 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + authorizationv1 "github.com/openshift/api/authorization/v1" + internal "github.com/openshift/client-go/authorization/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// RoleBindingRestrictionApplyConfiguration represents a declarative configuration of the RoleBindingRestriction type for use +// with apply. +type RoleBindingRestrictionApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *RoleBindingRestrictionSpecApplyConfiguration `json:"spec,omitempty"` +} + +// RoleBindingRestriction constructs a declarative configuration of the RoleBindingRestriction type for use with +// apply. +func RoleBindingRestriction(name, namespace string) *RoleBindingRestrictionApplyConfiguration { + b := &RoleBindingRestrictionApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("RoleBindingRestriction") + b.WithAPIVersion("authorization.openshift.io/v1") + return b +} + +// ExtractRoleBindingRestriction extracts the applied configuration owned by fieldManager from +// roleBindingRestriction. If no managedFields are found in roleBindingRestriction for fieldManager, a +// RoleBindingRestrictionApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// roleBindingRestriction must be a unmodified RoleBindingRestriction API object that was retrieved from the Kubernetes API. +// ExtractRoleBindingRestriction provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractRoleBindingRestriction(roleBindingRestriction *authorizationv1.RoleBindingRestriction, fieldManager string) (*RoleBindingRestrictionApplyConfiguration, error) { + return extractRoleBindingRestriction(roleBindingRestriction, fieldManager, "") +} + +// ExtractRoleBindingRestrictionStatus is the same as ExtractRoleBindingRestriction except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractRoleBindingRestrictionStatus(roleBindingRestriction *authorizationv1.RoleBindingRestriction, fieldManager string) (*RoleBindingRestrictionApplyConfiguration, error) { + return extractRoleBindingRestriction(roleBindingRestriction, fieldManager, "status") +} + +func extractRoleBindingRestriction(roleBindingRestriction *authorizationv1.RoleBindingRestriction, fieldManager string, subresource string) (*RoleBindingRestrictionApplyConfiguration, error) { + b := &RoleBindingRestrictionApplyConfiguration{} + err := managedfields.ExtractInto(roleBindingRestriction, internal.Parser().Type("com.github.openshift.api.authorization.v1.RoleBindingRestriction"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(roleBindingRestriction.Name) + b.WithNamespace(roleBindingRestriction.Namespace) + + b.WithKind("RoleBindingRestriction") + b.WithAPIVersion("authorization.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *RoleBindingRestrictionApplyConfiguration) WithKind(value string) *RoleBindingRestrictionApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *RoleBindingRestrictionApplyConfiguration) WithAPIVersion(value string) *RoleBindingRestrictionApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *RoleBindingRestrictionApplyConfiguration) WithName(value string) *RoleBindingRestrictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *RoleBindingRestrictionApplyConfiguration) WithGenerateName(value string) *RoleBindingRestrictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *RoleBindingRestrictionApplyConfiguration) WithNamespace(value string) *RoleBindingRestrictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *RoleBindingRestrictionApplyConfiguration) WithUID(value types.UID) *RoleBindingRestrictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *RoleBindingRestrictionApplyConfiguration) WithResourceVersion(value string) *RoleBindingRestrictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *RoleBindingRestrictionApplyConfiguration) WithGeneration(value int64) *RoleBindingRestrictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *RoleBindingRestrictionApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *RoleBindingRestrictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *RoleBindingRestrictionApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *RoleBindingRestrictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *RoleBindingRestrictionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RoleBindingRestrictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *RoleBindingRestrictionApplyConfiguration) WithLabels(entries map[string]string) *RoleBindingRestrictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *RoleBindingRestrictionApplyConfiguration) WithAnnotations(entries map[string]string) *RoleBindingRestrictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *RoleBindingRestrictionApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *RoleBindingRestrictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *RoleBindingRestrictionApplyConfiguration) WithFinalizers(values ...string) *RoleBindingRestrictionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *RoleBindingRestrictionApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *RoleBindingRestrictionApplyConfiguration) WithSpec(value *RoleBindingRestrictionSpecApplyConfiguration) *RoleBindingRestrictionApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleBindingRestrictionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/rolebindingrestrictionspec.go b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/rolebindingrestrictionspec.go new file mode 100644 index 0000000000000..9482ebcce6951 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/rolebindingrestrictionspec.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// RoleBindingRestrictionSpecApplyConfiguration represents a declarative configuration of the RoleBindingRestrictionSpec type for use +// with apply. +type RoleBindingRestrictionSpecApplyConfiguration struct { + UserRestriction *UserRestrictionApplyConfiguration `json:"userrestriction,omitempty"` + GroupRestriction *GroupRestrictionApplyConfiguration `json:"grouprestriction,omitempty"` + ServiceAccountRestriction *ServiceAccountRestrictionApplyConfiguration `json:"serviceaccountrestriction,omitempty"` +} + +// RoleBindingRestrictionSpecApplyConfiguration constructs a declarative configuration of the RoleBindingRestrictionSpec type for use with +// apply. +func RoleBindingRestrictionSpec() *RoleBindingRestrictionSpecApplyConfiguration { + return &RoleBindingRestrictionSpecApplyConfiguration{} +} + +// WithUserRestriction sets the UserRestriction field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UserRestriction field is set to the value of the last call. +func (b *RoleBindingRestrictionSpecApplyConfiguration) WithUserRestriction(value *UserRestrictionApplyConfiguration) *RoleBindingRestrictionSpecApplyConfiguration { + b.UserRestriction = value + return b +} + +// WithGroupRestriction sets the GroupRestriction field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GroupRestriction field is set to the value of the last call. +func (b *RoleBindingRestrictionSpecApplyConfiguration) WithGroupRestriction(value *GroupRestrictionApplyConfiguration) *RoleBindingRestrictionSpecApplyConfiguration { + b.GroupRestriction = value + return b +} + +// WithServiceAccountRestriction sets the ServiceAccountRestriction field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceAccountRestriction field is set to the value of the last call. +func (b *RoleBindingRestrictionSpecApplyConfiguration) WithServiceAccountRestriction(value *ServiceAccountRestrictionApplyConfiguration) *RoleBindingRestrictionSpecApplyConfiguration { + b.ServiceAccountRestriction = value + return b +} diff --git a/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/serviceaccountreference.go b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/serviceaccountreference.go new file mode 100644 index 0000000000000..9f22961ff487a --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/serviceaccountreference.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ServiceAccountReferenceApplyConfiguration represents a declarative configuration of the ServiceAccountReference type for use +// with apply. +type ServiceAccountReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Namespace *string `json:"namespace,omitempty"` +} + +// ServiceAccountReferenceApplyConfiguration constructs a declarative configuration of the ServiceAccountReference type for use with +// apply. +func ServiceAccountReference() *ServiceAccountReferenceApplyConfiguration { + return &ServiceAccountReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ServiceAccountReferenceApplyConfiguration) WithName(value string) *ServiceAccountReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ServiceAccountReferenceApplyConfiguration) WithNamespace(value string) *ServiceAccountReferenceApplyConfiguration { + b.Namespace = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/serviceaccountrestriction.go b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/serviceaccountrestriction.go new file mode 100644 index 0000000000000..9d0f1fcf9609a --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/serviceaccountrestriction.go @@ -0,0 +1,39 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ServiceAccountRestrictionApplyConfiguration represents a declarative configuration of the ServiceAccountRestriction type for use +// with apply. +type ServiceAccountRestrictionApplyConfiguration struct { + ServiceAccounts []ServiceAccountReferenceApplyConfiguration `json:"serviceaccounts,omitempty"` + Namespaces []string `json:"namespaces,omitempty"` +} + +// ServiceAccountRestrictionApplyConfiguration constructs a declarative configuration of the ServiceAccountRestriction type for use with +// apply. +func ServiceAccountRestriction() *ServiceAccountRestrictionApplyConfiguration { + return &ServiceAccountRestrictionApplyConfiguration{} +} + +// WithServiceAccounts adds the given value to the ServiceAccounts field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ServiceAccounts field. +func (b *ServiceAccountRestrictionApplyConfiguration) WithServiceAccounts(values ...*ServiceAccountReferenceApplyConfiguration) *ServiceAccountRestrictionApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithServiceAccounts") + } + b.ServiceAccounts = append(b.ServiceAccounts, *values[i]) + } + return b +} + +// WithNamespaces adds the given value to the Namespaces field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Namespaces field. +func (b *ServiceAccountRestrictionApplyConfiguration) WithNamespaces(values ...string) *ServiceAccountRestrictionApplyConfiguration { + for i := range values { + b.Namespaces = append(b.Namespaces, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/userrestriction.go b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/userrestriction.go new file mode 100644 index 0000000000000..dc931b60ed82d --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1/userrestriction.go @@ -0,0 +1,54 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// UserRestrictionApplyConfiguration represents a declarative configuration of the UserRestriction type for use +// with apply. +type UserRestrictionApplyConfiguration struct { + Users []string `json:"users,omitempty"` + Groups []string `json:"groups,omitempty"` + Selectors []metav1.LabelSelectorApplyConfiguration `json:"labels,omitempty"` +} + +// UserRestrictionApplyConfiguration constructs a declarative configuration of the UserRestriction type for use with +// apply. +func UserRestriction() *UserRestrictionApplyConfiguration { + return &UserRestrictionApplyConfiguration{} +} + +// WithUsers adds the given value to the Users field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Users field. +func (b *UserRestrictionApplyConfiguration) WithUsers(values ...string) *UserRestrictionApplyConfiguration { + for i := range values { + b.Users = append(b.Users, values[i]) + } + return b +} + +// WithGroups adds the given value to the Groups field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Groups field. +func (b *UserRestrictionApplyConfiguration) WithGroups(values ...string) *UserRestrictionApplyConfiguration { + for i := range values { + b.Groups = append(b.Groups, values[i]) + } + return b +} + +// WithSelectors adds the given value to the Selectors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Selectors field. +func (b *UserRestrictionApplyConfiguration) WithSelectors(values ...*metav1.LabelSelectorApplyConfiguration) *UserRestrictionApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSelectors") + } + b.Selectors = append(b.Selectors, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/authorization/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/internal/internal.go new file mode 100644 index 0000000000000..22313b3cf5678 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/internal/internal.go @@ -0,0 +1,489 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package internal + +import ( + fmt "fmt" + sync "sync" + + typed "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +func Parser() *typed.Parser { + parserOnce.Do(func() { + var err error + parser, err = typed.NewParser(schemaYAML) + if err != nil { + panic(fmt.Sprintf("Failed to parse schema: %v", err)) + } + }) + return parser +} + +var parserOnce sync.Once +var parser *typed.Parser +var schemaYAML = typed.YAMLObject(`types: +- name: com.github.openshift.api.authorization.v1.ClusterRole + map: + fields: + - name: aggregationRule + type: + namedType: io.k8s.api.rbac.v1.AggregationRule + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: rules + type: + list: + elementType: + namedType: com.github.openshift.api.authorization.v1.PolicyRule + elementRelationship: atomic +- name: com.github.openshift.api.authorization.v1.ClusterRoleBinding + map: + fields: + - name: apiVersion + type: + scalar: string + - name: groupNames + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: roleRef + type: + namedType: io.k8s.api.core.v1.ObjectReference + default: {} + - name: subjects + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ObjectReference + elementRelationship: atomic + - name: userNames + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.authorization.v1.GroupRestriction + map: + fields: + - name: groups + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: labels + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + elementRelationship: atomic +- name: com.github.openshift.api.authorization.v1.PolicyRule + map: + fields: + - name: apiGroups + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: attributeRestrictions + type: + namedType: __untyped_atomic_ + - name: nonResourceURLs + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: resourceNames + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: resources + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: verbs + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.authorization.v1.Role + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: rules + type: + list: + elementType: + namedType: com.github.openshift.api.authorization.v1.PolicyRule + elementRelationship: atomic +- name: com.github.openshift.api.authorization.v1.RoleBinding + map: + fields: + - name: apiVersion + type: + scalar: string + - name: groupNames + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: roleRef + type: + namedType: io.k8s.api.core.v1.ObjectReference + default: {} + - name: subjects + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ObjectReference + elementRelationship: atomic + - name: userNames + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.authorization.v1.RoleBindingRestriction + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.authorization.v1.RoleBindingRestrictionSpec + default: {} +- name: com.github.openshift.api.authorization.v1.RoleBindingRestrictionSpec + map: + fields: + - name: grouprestriction + type: + namedType: com.github.openshift.api.authorization.v1.GroupRestriction + - name: serviceaccountrestriction + type: + namedType: com.github.openshift.api.authorization.v1.ServiceAccountRestriction + - name: userrestriction + type: + namedType: com.github.openshift.api.authorization.v1.UserRestriction +- name: com.github.openshift.api.authorization.v1.ServiceAccountReference + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: namespace + type: + scalar: string + default: "" +- name: com.github.openshift.api.authorization.v1.ServiceAccountRestriction + map: + fields: + - name: namespaces + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: serviceaccounts + type: + list: + elementType: + namedType: com.github.openshift.api.authorization.v1.ServiceAccountReference + elementRelationship: atomic +- name: com.github.openshift.api.authorization.v1.UserRestriction + map: + fields: + - name: groups + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: labels + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + elementRelationship: atomic + - name: users + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.core.v1.ObjectReference + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldPath + type: + scalar: string + - name: kind + type: + scalar: string + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: resourceVersion + type: + scalar: string + - name: uid + type: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.rbac.v1.AggregationRule + map: + fields: + - name: clusterRoleSelectors + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + map: + fields: + - name: matchExpressions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement + elementRelationship: atomic + - name: matchLabels + type: + map: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: operator + type: + scalar: string + default: "" + - name: values + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldsType + type: + scalar: string + - name: fieldsV1 + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + - name: manager + type: + scalar: string + - name: operation + type: + scalar: string + - name: subresource + type: + scalar: string + - name: time + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + map: + fields: + - name: annotations + type: + map: + elementType: + scalar: string + - name: creationTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: deletionGracePeriodSeconds + type: + scalar: numeric + - name: deletionTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: finalizers + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: generateName + type: + scalar: string + - name: generation + type: + scalar: numeric + - name: labels + type: + map: + elementType: + scalar: string + - name: managedFields + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + elementRelationship: atomic + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: ownerReferences + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + elementRelationship: associative + keys: + - uid + - name: resourceVersion + type: + scalar: string + - name: selfLink + type: + scalar: string + - name: uid + type: + scalar: string +- name: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + map: + fields: + - name: apiVersion + type: + scalar: string + default: "" + - name: blockOwnerDeletion + type: + scalar: boolean + - name: controller + type: + scalar: boolean + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: uid + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Time + scalar: untyped +- name: io.k8s.apimachinery.pkg.runtime.RawExtension + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +- name: __untyped_deduced_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +`) diff --git a/vendor/github.com/openshift/client-go/authorization/applyconfigurations/utils.go b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/utils.go new file mode 100644 index 0000000000000..c918f2b64b67b --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/applyconfigurations/utils.go @@ -0,0 +1,48 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package applyconfigurations + +import ( + v1 "github.com/openshift/api/authorization/v1" + authorizationv1 "github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1" + internal "github.com/openshift/client-go/authorization/applyconfigurations/internal" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no +// apply configuration type exists for the given GroupVersionKind. +func ForKind(kind schema.GroupVersionKind) interface{} { + switch kind { + // Group=authorization.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithKind("ClusterRole"): + return &authorizationv1.ClusterRoleApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ClusterRoleBinding"): + return &authorizationv1.ClusterRoleBindingApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("GroupRestriction"): + return &authorizationv1.GroupRestrictionApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("PolicyRule"): + return &authorizationv1.PolicyRuleApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Role"): + return &authorizationv1.RoleApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("RoleBinding"): + return &authorizationv1.RoleBindingApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("RoleBindingRestriction"): + return &authorizationv1.RoleBindingRestrictionApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("RoleBindingRestrictionSpec"): + return &authorizationv1.RoleBindingRestrictionSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ServiceAccountReference"): + return &authorizationv1.ServiceAccountReferenceApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ServiceAccountRestriction"): + return &authorizationv1.ServiceAccountRestrictionApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("UserRestriction"): + return &authorizationv1.UserRestrictionApplyConfiguration{} + + } + return nil +} + +func NewTypeConverter(scheme *runtime.Scheme) *testing.TypeConverter { + return &testing.TypeConverter{Scheme: scheme, TypeResolver: internal.Parser()} +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/clientset.go new file mode 100644 index 0000000000000..795638be4f57a --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/clientset.go @@ -0,0 +1,104 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + fmt "fmt" + http "net/http" + + authorizationv1 "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + AuthorizationV1() authorizationv1.AuthorizationV1Interface +} + +// Clientset contains the clients for groups. +type Clientset struct { + *discovery.DiscoveryClient + authorizationV1 *authorizationv1.AuthorizationV1Client +} + +// AuthorizationV1 retrieves the AuthorizationV1Client +func (c *Clientset) AuthorizationV1() authorizationv1.AuthorizationV1Interface { + return c.authorizationV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.authorizationV1, err = authorizationv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.authorizationV1 = authorizationv1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 0000000000000..aaafcfeab98db --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,106 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + applyconfigurations "github.com/openshift/client-go/authorization/applyconfigurations" + clientset "github.com/openshift/client-go/authorization/clientset/versioned" + authorizationv1 "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1" + fakeauthorizationv1 "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +// +// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. +// via --with-applyconfig). +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +// NewClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewClientset(objects ...runtime.Object) *Clientset { + o := testing.NewFieldManagedObjectTracker( + scheme, + codecs.UniversalDecoder(), + applyconfigurations.NewTypeConverter(scheme), + ) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) + +// AuthorizationV1 retrieves the AuthorizationV1Client +func (c *Clientset) AuthorizationV1() authorizationv1.AuthorizationV1Interface { + return &fakeauthorizationv1.FakeAuthorizationV1{Fake: &c.Fake} +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/fake/doc.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/fake/doc.go new file mode 100644 index 0000000000000..3630ed1cd17db --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/fake/register.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/fake/register.go new file mode 100644 index 0000000000000..12ab6c8546f14 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/fake/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + authorizationv1 "github.com/openshift/api/authorization/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +var localSchemeBuilder = runtime.SchemeBuilder{ + authorizationv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000000..14db57a58f8d2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/scheme/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000000..226bf053438e1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/scheme/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + authorizationv1 "github.com/openshift/api/authorization/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + authorizationv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/authorization_client.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/authorization_client.go new file mode 100644 index 0000000000000..a91d6f7252a78 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/authorization_client.go @@ -0,0 +1,141 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + http "net/http" + + authorizationv1 "github.com/openshift/api/authorization/v1" + scheme "github.com/openshift/client-go/authorization/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type AuthorizationV1Interface interface { + RESTClient() rest.Interface + ClusterRolesGetter + ClusterRoleBindingsGetter + LocalResourceAccessReviewsGetter + LocalSubjectAccessReviewsGetter + ResourceAccessReviewsGetter + RolesGetter + RoleBindingsGetter + RoleBindingRestrictionsGetter + SelfSubjectRulesReviewsGetter + SubjectAccessReviewsGetter + SubjectRulesReviewsGetter +} + +// AuthorizationV1Client is used to interact with features provided by the authorization.openshift.io group. +type AuthorizationV1Client struct { + restClient rest.Interface +} + +func (c *AuthorizationV1Client) ClusterRoles() ClusterRoleInterface { + return newClusterRoles(c) +} + +func (c *AuthorizationV1Client) ClusterRoleBindings() ClusterRoleBindingInterface { + return newClusterRoleBindings(c) +} + +func (c *AuthorizationV1Client) LocalResourceAccessReviews(namespace string) LocalResourceAccessReviewInterface { + return newLocalResourceAccessReviews(c, namespace) +} + +func (c *AuthorizationV1Client) LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface { + return newLocalSubjectAccessReviews(c, namespace) +} + +func (c *AuthorizationV1Client) ResourceAccessReviews() ResourceAccessReviewInterface { + return newResourceAccessReviews(c) +} + +func (c *AuthorizationV1Client) Roles(namespace string) RoleInterface { + return newRoles(c, namespace) +} + +func (c *AuthorizationV1Client) RoleBindings(namespace string) RoleBindingInterface { + return newRoleBindings(c, namespace) +} + +func (c *AuthorizationV1Client) RoleBindingRestrictions(namespace string) RoleBindingRestrictionInterface { + return newRoleBindingRestrictions(c, namespace) +} + +func (c *AuthorizationV1Client) SelfSubjectRulesReviews(namespace string) SelfSubjectRulesReviewInterface { + return newSelfSubjectRulesReviews(c, namespace) +} + +func (c *AuthorizationV1Client) SubjectAccessReviews() SubjectAccessReviewInterface { + return newSubjectAccessReviews(c) +} + +func (c *AuthorizationV1Client) SubjectRulesReviews(namespace string) SubjectRulesReviewInterface { + return newSubjectRulesReviews(c, namespace) +} + +// NewForConfig creates a new AuthorizationV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*AuthorizationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AuthorizationV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AuthorizationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &AuthorizationV1Client{client}, nil +} + +// NewForConfigOrDie creates a new AuthorizationV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuthorizationV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuthorizationV1Client for the given RESTClient. +func New(c rest.Interface) *AuthorizationV1Client { + return &AuthorizationV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := authorizationv1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuthorizationV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/clusterrole.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/clusterrole.go new file mode 100644 index 0000000000000..f863b1242d993 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/clusterrole.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + authorizationv1 "github.com/openshift/api/authorization/v1" + applyconfigurationsauthorizationv1 "github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1" + scheme "github.com/openshift/client-go/authorization/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ClusterRolesGetter has a method to return a ClusterRoleInterface. +// A group's client should implement this interface. +type ClusterRolesGetter interface { + ClusterRoles() ClusterRoleInterface +} + +// ClusterRoleInterface has methods to work with ClusterRole resources. +type ClusterRoleInterface interface { + Create(ctx context.Context, clusterRole *authorizationv1.ClusterRole, opts metav1.CreateOptions) (*authorizationv1.ClusterRole, error) + Update(ctx context.Context, clusterRole *authorizationv1.ClusterRole, opts metav1.UpdateOptions) (*authorizationv1.ClusterRole, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*authorizationv1.ClusterRole, error) + List(ctx context.Context, opts metav1.ListOptions) (*authorizationv1.ClusterRoleList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *authorizationv1.ClusterRole, err error) + Apply(ctx context.Context, clusterRole *applyconfigurationsauthorizationv1.ClusterRoleApplyConfiguration, opts metav1.ApplyOptions) (result *authorizationv1.ClusterRole, err error) + ClusterRoleExpansion +} + +// clusterRoles implements ClusterRoleInterface +type clusterRoles struct { + *gentype.ClientWithListAndApply[*authorizationv1.ClusterRole, *authorizationv1.ClusterRoleList, *applyconfigurationsauthorizationv1.ClusterRoleApplyConfiguration] +} + +// newClusterRoles returns a ClusterRoles +func newClusterRoles(c *AuthorizationV1Client) *clusterRoles { + return &clusterRoles{ + gentype.NewClientWithListAndApply[*authorizationv1.ClusterRole, *authorizationv1.ClusterRoleList, *applyconfigurationsauthorizationv1.ClusterRoleApplyConfiguration]( + "clusterroles", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *authorizationv1.ClusterRole { return &authorizationv1.ClusterRole{} }, + func() *authorizationv1.ClusterRoleList { return &authorizationv1.ClusterRoleList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/clusterrolebinding.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/clusterrolebinding.go new file mode 100644 index 0000000000000..773c28745c7f7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/clusterrolebinding.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + authorizationv1 "github.com/openshift/api/authorization/v1" + applyconfigurationsauthorizationv1 "github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1" + scheme "github.com/openshift/client-go/authorization/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. +// A group's client should implement this interface. +type ClusterRoleBindingsGetter interface { + ClusterRoleBindings() ClusterRoleBindingInterface +} + +// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. +type ClusterRoleBindingInterface interface { + Create(ctx context.Context, clusterRoleBinding *authorizationv1.ClusterRoleBinding, opts metav1.CreateOptions) (*authorizationv1.ClusterRoleBinding, error) + Update(ctx context.Context, clusterRoleBinding *authorizationv1.ClusterRoleBinding, opts metav1.UpdateOptions) (*authorizationv1.ClusterRoleBinding, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*authorizationv1.ClusterRoleBinding, error) + List(ctx context.Context, opts metav1.ListOptions) (*authorizationv1.ClusterRoleBindingList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *authorizationv1.ClusterRoleBinding, err error) + Apply(ctx context.Context, clusterRoleBinding *applyconfigurationsauthorizationv1.ClusterRoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *authorizationv1.ClusterRoleBinding, err error) + ClusterRoleBindingExpansion +} + +// clusterRoleBindings implements ClusterRoleBindingInterface +type clusterRoleBindings struct { + *gentype.ClientWithListAndApply[*authorizationv1.ClusterRoleBinding, *authorizationv1.ClusterRoleBindingList, *applyconfigurationsauthorizationv1.ClusterRoleBindingApplyConfiguration] +} + +// newClusterRoleBindings returns a ClusterRoleBindings +func newClusterRoleBindings(c *AuthorizationV1Client) *clusterRoleBindings { + return &clusterRoleBindings{ + gentype.NewClientWithListAndApply[*authorizationv1.ClusterRoleBinding, *authorizationv1.ClusterRoleBindingList, *applyconfigurationsauthorizationv1.ClusterRoleBindingApplyConfiguration]( + "clusterrolebindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *authorizationv1.ClusterRoleBinding { return &authorizationv1.ClusterRoleBinding{} }, + func() *authorizationv1.ClusterRoleBindingList { return &authorizationv1.ClusterRoleBindingList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/doc.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/doc.go new file mode 100644 index 0000000000000..225e6b2be34f2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/doc.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/doc.go new file mode 100644 index 0000000000000..2b5ba4c8e4422 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_authorization_client.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_authorization_client.go new file mode 100644 index 0000000000000..df76413f82701 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_authorization_client.go @@ -0,0 +1,64 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeAuthorizationV1 struct { + *testing.Fake +} + +func (c *FakeAuthorizationV1) ClusterRoles() v1.ClusterRoleInterface { + return newFakeClusterRoles(c) +} + +func (c *FakeAuthorizationV1) ClusterRoleBindings() v1.ClusterRoleBindingInterface { + return newFakeClusterRoleBindings(c) +} + +func (c *FakeAuthorizationV1) LocalResourceAccessReviews(namespace string) v1.LocalResourceAccessReviewInterface { + return newFakeLocalResourceAccessReviews(c, namespace) +} + +func (c *FakeAuthorizationV1) LocalSubjectAccessReviews(namespace string) v1.LocalSubjectAccessReviewInterface { + return newFakeLocalSubjectAccessReviews(c, namespace) +} + +func (c *FakeAuthorizationV1) ResourceAccessReviews() v1.ResourceAccessReviewInterface { + return newFakeResourceAccessReviews(c) +} + +func (c *FakeAuthorizationV1) Roles(namespace string) v1.RoleInterface { + return newFakeRoles(c, namespace) +} + +func (c *FakeAuthorizationV1) RoleBindings(namespace string) v1.RoleBindingInterface { + return newFakeRoleBindings(c, namespace) +} + +func (c *FakeAuthorizationV1) RoleBindingRestrictions(namespace string) v1.RoleBindingRestrictionInterface { + return newFakeRoleBindingRestrictions(c, namespace) +} + +func (c *FakeAuthorizationV1) SelfSubjectRulesReviews(namespace string) v1.SelfSubjectRulesReviewInterface { + return newFakeSelfSubjectRulesReviews(c, namespace) +} + +func (c *FakeAuthorizationV1) SubjectAccessReviews() v1.SubjectAccessReviewInterface { + return newFakeSubjectAccessReviews(c) +} + +func (c *FakeAuthorizationV1) SubjectRulesReviews(namespace string) v1.SubjectRulesReviewInterface { + return newFakeSubjectRulesReviews(c, namespace) +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAuthorizationV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_clusterrole.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_clusterrole.go new file mode 100644 index 0000000000000..de34657d6509b --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_clusterrole.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/authorization/v1" + authorizationv1 "github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1" + typedauthorizationv1 "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeClusterRoles implements ClusterRoleInterface +type fakeClusterRoles struct { + *gentype.FakeClientWithListAndApply[*v1.ClusterRole, *v1.ClusterRoleList, *authorizationv1.ClusterRoleApplyConfiguration] + Fake *FakeAuthorizationV1 +} + +func newFakeClusterRoles(fake *FakeAuthorizationV1) typedauthorizationv1.ClusterRoleInterface { + return &fakeClusterRoles{ + gentype.NewFakeClientWithListAndApply[*v1.ClusterRole, *v1.ClusterRoleList, *authorizationv1.ClusterRoleApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("clusterroles"), + v1.SchemeGroupVersion.WithKind("ClusterRole"), + func() *v1.ClusterRole { return &v1.ClusterRole{} }, + func() *v1.ClusterRoleList { return &v1.ClusterRoleList{} }, + func(dst, src *v1.ClusterRoleList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ClusterRoleList) []*v1.ClusterRole { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.ClusterRoleList, items []*v1.ClusterRole) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_clusterrolebinding.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_clusterrolebinding.go new file mode 100644 index 0000000000000..92346e547398c --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_clusterrolebinding.go @@ -0,0 +1,37 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/authorization/v1" + authorizationv1 "github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1" + typedauthorizationv1 "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeClusterRoleBindings implements ClusterRoleBindingInterface +type fakeClusterRoleBindings struct { + *gentype.FakeClientWithListAndApply[*v1.ClusterRoleBinding, *v1.ClusterRoleBindingList, *authorizationv1.ClusterRoleBindingApplyConfiguration] + Fake *FakeAuthorizationV1 +} + +func newFakeClusterRoleBindings(fake *FakeAuthorizationV1) typedauthorizationv1.ClusterRoleBindingInterface { + return &fakeClusterRoleBindings{ + gentype.NewFakeClientWithListAndApply[*v1.ClusterRoleBinding, *v1.ClusterRoleBindingList, *authorizationv1.ClusterRoleBindingApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("clusterrolebindings"), + v1.SchemeGroupVersion.WithKind("ClusterRoleBinding"), + func() *v1.ClusterRoleBinding { return &v1.ClusterRoleBinding{} }, + func() *v1.ClusterRoleBindingList { return &v1.ClusterRoleBindingList{} }, + func(dst, src *v1.ClusterRoleBindingList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ClusterRoleBindingList) []*v1.ClusterRoleBinding { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1.ClusterRoleBindingList, items []*v1.ClusterRoleBinding) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_localresourceaccessreview.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_localresourceaccessreview.go new file mode 100644 index 0000000000000..ee442655f4850 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_localresourceaccessreview.go @@ -0,0 +1,44 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + v1 "github.com/openshift/api/authorization/v1" + authorizationv1 "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" + testing "k8s.io/client-go/testing" +) + +// fakeLocalResourceAccessReviews implements LocalResourceAccessReviewInterface +type fakeLocalResourceAccessReviews struct { + *gentype.FakeClient[*v1.LocalResourceAccessReview] + Fake *FakeAuthorizationV1 +} + +func newFakeLocalResourceAccessReviews(fake *FakeAuthorizationV1, namespace string) authorizationv1.LocalResourceAccessReviewInterface { + return &fakeLocalResourceAccessReviews{ + gentype.NewFakeClient[*v1.LocalResourceAccessReview]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("localresourceaccessreviews"), + v1.SchemeGroupVersion.WithKind("LocalResourceAccessReview"), + func() *v1.LocalResourceAccessReview { return &v1.LocalResourceAccessReview{} }, + ), + fake, + } +} + +// Create takes the representation of a localResourceAccessReview and creates it. Returns the server's representation of the resourceAccessReviewResponse, and an error, if there is any. +func (c *fakeLocalResourceAccessReviews) Create(ctx context.Context, localResourceAccessReview *v1.LocalResourceAccessReview, opts metav1.CreateOptions) (result *v1.ResourceAccessReviewResponse, err error) { + emptyResult := &v1.ResourceAccessReviewResponse{} + obj, err := c.Fake. + Invokes(testing.NewCreateActionWithOptions(c.Resource(), c.Namespace(), localResourceAccessReview, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ResourceAccessReviewResponse), err +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_localsubjectaccessreview.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_localsubjectaccessreview.go new file mode 100644 index 0000000000000..62b0d73829f88 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_localsubjectaccessreview.go @@ -0,0 +1,44 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + v1 "github.com/openshift/api/authorization/v1" + authorizationv1 "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" + testing "k8s.io/client-go/testing" +) + +// fakeLocalSubjectAccessReviews implements LocalSubjectAccessReviewInterface +type fakeLocalSubjectAccessReviews struct { + *gentype.FakeClient[*v1.LocalSubjectAccessReview] + Fake *FakeAuthorizationV1 +} + +func newFakeLocalSubjectAccessReviews(fake *FakeAuthorizationV1, namespace string) authorizationv1.LocalSubjectAccessReviewInterface { + return &fakeLocalSubjectAccessReviews{ + gentype.NewFakeClient[*v1.LocalSubjectAccessReview]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("localsubjectaccessreviews"), + v1.SchemeGroupVersion.WithKind("LocalSubjectAccessReview"), + func() *v1.LocalSubjectAccessReview { return &v1.LocalSubjectAccessReview{} }, + ), + fake, + } +} + +// Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the subjectAccessReviewResponse, and an error, if there is any. +func (c *fakeLocalSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview, opts metav1.CreateOptions) (result *v1.SubjectAccessReviewResponse, err error) { + emptyResult := &v1.SubjectAccessReviewResponse{} + obj, err := c.Fake. + Invokes(testing.NewCreateActionWithOptions(c.Resource(), c.Namespace(), localSubjectAccessReview, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.SubjectAccessReviewResponse), err +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_resourceaccessreview.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_resourceaccessreview.go new file mode 100644 index 0000000000000..e34368a235df1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_resourceaccessreview.go @@ -0,0 +1,43 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + v1 "github.com/openshift/api/authorization/v1" + authorizationv1 "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" + testing "k8s.io/client-go/testing" +) + +// fakeResourceAccessReviews implements ResourceAccessReviewInterface +type fakeResourceAccessReviews struct { + *gentype.FakeClient[*v1.ResourceAccessReview] + Fake *FakeAuthorizationV1 +} + +func newFakeResourceAccessReviews(fake *FakeAuthorizationV1) authorizationv1.ResourceAccessReviewInterface { + return &fakeResourceAccessReviews{ + gentype.NewFakeClient[*v1.ResourceAccessReview]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("resourceaccessreviews"), + v1.SchemeGroupVersion.WithKind("ResourceAccessReview"), + func() *v1.ResourceAccessReview { return &v1.ResourceAccessReview{} }, + ), + fake, + } +} + +// Create takes the representation of a resourceAccessReview and creates it. Returns the server's representation of the resourceAccessReviewResponse, and an error, if there is any. +func (c *fakeResourceAccessReviews) Create(ctx context.Context, resourceAccessReview *v1.ResourceAccessReview, opts metav1.CreateOptions) (result *v1.ResourceAccessReviewResponse, err error) { + emptyResult := &v1.ResourceAccessReviewResponse{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(c.Resource(), resourceAccessReview, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ResourceAccessReviewResponse), err +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_role.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_role.go new file mode 100644 index 0000000000000..7e534f6bd9c4b --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_role.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/authorization/v1" + authorizationv1 "github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1" + typedauthorizationv1 "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeRoles implements RoleInterface +type fakeRoles struct { + *gentype.FakeClientWithListAndApply[*v1.Role, *v1.RoleList, *authorizationv1.RoleApplyConfiguration] + Fake *FakeAuthorizationV1 +} + +func newFakeRoles(fake *FakeAuthorizationV1, namespace string) typedauthorizationv1.RoleInterface { + return &fakeRoles{ + gentype.NewFakeClientWithListAndApply[*v1.Role, *v1.RoleList, *authorizationv1.RoleApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("roles"), + v1.SchemeGroupVersion.WithKind("Role"), + func() *v1.Role { return &v1.Role{} }, + func() *v1.RoleList { return &v1.RoleList{} }, + func(dst, src *v1.RoleList) { dst.ListMeta = src.ListMeta }, + func(list *v1.RoleList) []*v1.Role { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.RoleList, items []*v1.Role) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_rolebinding.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_rolebinding.go new file mode 100644 index 0000000000000..f8226b7e44e2b --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_rolebinding.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/authorization/v1" + authorizationv1 "github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1" + typedauthorizationv1 "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeRoleBindings implements RoleBindingInterface +type fakeRoleBindings struct { + *gentype.FakeClientWithListAndApply[*v1.RoleBinding, *v1.RoleBindingList, *authorizationv1.RoleBindingApplyConfiguration] + Fake *FakeAuthorizationV1 +} + +func newFakeRoleBindings(fake *FakeAuthorizationV1, namespace string) typedauthorizationv1.RoleBindingInterface { + return &fakeRoleBindings{ + gentype.NewFakeClientWithListAndApply[*v1.RoleBinding, *v1.RoleBindingList, *authorizationv1.RoleBindingApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("rolebindings"), + v1.SchemeGroupVersion.WithKind("RoleBinding"), + func() *v1.RoleBinding { return &v1.RoleBinding{} }, + func() *v1.RoleBindingList { return &v1.RoleBindingList{} }, + func(dst, src *v1.RoleBindingList) { dst.ListMeta = src.ListMeta }, + func(list *v1.RoleBindingList) []*v1.RoleBinding { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.RoleBindingList, items []*v1.RoleBinding) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_rolebindingrestriction.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_rolebindingrestriction.go new file mode 100644 index 0000000000000..0530e76193277 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_rolebindingrestriction.go @@ -0,0 +1,37 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/authorization/v1" + authorizationv1 "github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1" + typedauthorizationv1 "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeRoleBindingRestrictions implements RoleBindingRestrictionInterface +type fakeRoleBindingRestrictions struct { + *gentype.FakeClientWithListAndApply[*v1.RoleBindingRestriction, *v1.RoleBindingRestrictionList, *authorizationv1.RoleBindingRestrictionApplyConfiguration] + Fake *FakeAuthorizationV1 +} + +func newFakeRoleBindingRestrictions(fake *FakeAuthorizationV1, namespace string) typedauthorizationv1.RoleBindingRestrictionInterface { + return &fakeRoleBindingRestrictions{ + gentype.NewFakeClientWithListAndApply[*v1.RoleBindingRestriction, *v1.RoleBindingRestrictionList, *authorizationv1.RoleBindingRestrictionApplyConfiguration]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("rolebindingrestrictions"), + v1.SchemeGroupVersion.WithKind("RoleBindingRestriction"), + func() *v1.RoleBindingRestriction { return &v1.RoleBindingRestriction{} }, + func() *v1.RoleBindingRestrictionList { return &v1.RoleBindingRestrictionList{} }, + func(dst, src *v1.RoleBindingRestrictionList) { dst.ListMeta = src.ListMeta }, + func(list *v1.RoleBindingRestrictionList) []*v1.RoleBindingRestriction { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1.RoleBindingRestrictionList, items []*v1.RoleBindingRestriction) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go new file mode 100644 index 0000000000000..827eb749d2204 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go @@ -0,0 +1,28 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/authorization/v1" + authorizationv1 "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeSelfSubjectRulesReviews implements SelfSubjectRulesReviewInterface +type fakeSelfSubjectRulesReviews struct { + *gentype.FakeClient[*v1.SelfSubjectRulesReview] + Fake *FakeAuthorizationV1 +} + +func newFakeSelfSubjectRulesReviews(fake *FakeAuthorizationV1, namespace string) authorizationv1.SelfSubjectRulesReviewInterface { + return &fakeSelfSubjectRulesReviews{ + gentype.NewFakeClient[*v1.SelfSubjectRulesReview]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("selfsubjectrulesreviews"), + v1.SchemeGroupVersion.WithKind("SelfSubjectRulesReview"), + func() *v1.SelfSubjectRulesReview { return &v1.SelfSubjectRulesReview{} }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_subjectaccessreview.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_subjectaccessreview.go new file mode 100644 index 0000000000000..aca9c0750db17 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_subjectaccessreview.go @@ -0,0 +1,43 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + v1 "github.com/openshift/api/authorization/v1" + authorizationv1 "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" + testing "k8s.io/client-go/testing" +) + +// fakeSubjectAccessReviews implements SubjectAccessReviewInterface +type fakeSubjectAccessReviews struct { + *gentype.FakeClient[*v1.SubjectAccessReview] + Fake *FakeAuthorizationV1 +} + +func newFakeSubjectAccessReviews(fake *FakeAuthorizationV1) authorizationv1.SubjectAccessReviewInterface { + return &fakeSubjectAccessReviews{ + gentype.NewFakeClient[*v1.SubjectAccessReview]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("subjectaccessreviews"), + v1.SchemeGroupVersion.WithKind("SubjectAccessReview"), + func() *v1.SubjectAccessReview { return &v1.SubjectAccessReview{} }, + ), + fake, + } +} + +// Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReviewResponse, and an error, if there is any. +func (c *fakeSubjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview, opts metav1.CreateOptions) (result *v1.SubjectAccessReviewResponse, err error) { + emptyResult := &v1.SubjectAccessReviewResponse{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(c.Resource(), subjectAccessReview, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.SubjectAccessReviewResponse), err +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_subjectrulesreview.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_subjectrulesreview.go new file mode 100644 index 0000000000000..42cf559812837 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake/fake_subjectrulesreview.go @@ -0,0 +1,28 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/authorization/v1" + authorizationv1 "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeSubjectRulesReviews implements SubjectRulesReviewInterface +type fakeSubjectRulesReviews struct { + *gentype.FakeClient[*v1.SubjectRulesReview] + Fake *FakeAuthorizationV1 +} + +func newFakeSubjectRulesReviews(fake *FakeAuthorizationV1, namespace string) authorizationv1.SubjectRulesReviewInterface { + return &fakeSubjectRulesReviews{ + gentype.NewFakeClient[*v1.SubjectRulesReview]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("subjectrulesreviews"), + v1.SchemeGroupVersion.WithKind("SubjectRulesReview"), + func() *v1.SubjectRulesReview { return &v1.SubjectRulesReview{} }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/generated_expansion.go new file mode 100644 index 0000000000000..ff51ceca26629 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/generated_expansion.go @@ -0,0 +1,25 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type ClusterRoleExpansion interface{} + +type ClusterRoleBindingExpansion interface{} + +type LocalResourceAccessReviewExpansion interface{} + +type LocalSubjectAccessReviewExpansion interface{} + +type ResourceAccessReviewExpansion interface{} + +type RoleExpansion interface{} + +type RoleBindingExpansion interface{} + +type RoleBindingRestrictionExpansion interface{} + +type SelfSubjectRulesReviewExpansion interface{} + +type SubjectAccessReviewExpansion interface{} + +type SubjectRulesReviewExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/localresourceaccessreview.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/localresourceaccessreview.go new file mode 100644 index 0000000000000..53d5dc570c108 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/localresourceaccessreview.go @@ -0,0 +1,56 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + authorizationv1 "github.com/openshift/api/authorization/v1" + scheme "github.com/openshift/client-go/authorization/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" +) + +// LocalResourceAccessReviewsGetter has a method to return a LocalResourceAccessReviewInterface. +// A group's client should implement this interface. +type LocalResourceAccessReviewsGetter interface { + LocalResourceAccessReviews(namespace string) LocalResourceAccessReviewInterface +} + +// LocalResourceAccessReviewInterface has methods to work with LocalResourceAccessReview resources. +type LocalResourceAccessReviewInterface interface { + Create(ctx context.Context, localResourceAccessReview *authorizationv1.LocalResourceAccessReview, opts metav1.CreateOptions) (*authorizationv1.ResourceAccessReviewResponse, error) + + LocalResourceAccessReviewExpansion +} + +// localResourceAccessReviews implements LocalResourceAccessReviewInterface +type localResourceAccessReviews struct { + *gentype.Client[*authorizationv1.LocalResourceAccessReview] +} + +// newLocalResourceAccessReviews returns a LocalResourceAccessReviews +func newLocalResourceAccessReviews(c *AuthorizationV1Client, namespace string) *localResourceAccessReviews { + return &localResourceAccessReviews{ + gentype.NewClient[*authorizationv1.LocalResourceAccessReview]( + "localresourceaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *authorizationv1.LocalResourceAccessReview { return &authorizationv1.LocalResourceAccessReview{} }, + ), + } +} + +// Create takes the representation of a localResourceAccessReview and creates it. Returns the server's representation of the resourceAccessReviewResponse, and an error, if there is any. +func (c *localResourceAccessReviews) Create(ctx context.Context, localResourceAccessReview *authorizationv1.LocalResourceAccessReview, opts metav1.CreateOptions) (result *authorizationv1.ResourceAccessReviewResponse, err error) { + result = &authorizationv1.ResourceAccessReviewResponse{} + err = c.GetClient().Post(). + Namespace(c.GetNamespace()). + Resource("localresourceaccessreviews"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(localResourceAccessReview). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/localsubjectaccessreview.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/localsubjectaccessreview.go new file mode 100644 index 0000000000000..037e7127f31ef --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/localsubjectaccessreview.go @@ -0,0 +1,56 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + authorizationv1 "github.com/openshift/api/authorization/v1" + scheme "github.com/openshift/client-go/authorization/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" +) + +// LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface. +// A group's client should implement this interface. +type LocalSubjectAccessReviewsGetter interface { + LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface +} + +// LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources. +type LocalSubjectAccessReviewInterface interface { + Create(ctx context.Context, localSubjectAccessReview *authorizationv1.LocalSubjectAccessReview, opts metav1.CreateOptions) (*authorizationv1.SubjectAccessReviewResponse, error) + + LocalSubjectAccessReviewExpansion +} + +// localSubjectAccessReviews implements LocalSubjectAccessReviewInterface +type localSubjectAccessReviews struct { + *gentype.Client[*authorizationv1.LocalSubjectAccessReview] +} + +// newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews +func newLocalSubjectAccessReviews(c *AuthorizationV1Client, namespace string) *localSubjectAccessReviews { + return &localSubjectAccessReviews{ + gentype.NewClient[*authorizationv1.LocalSubjectAccessReview]( + "localsubjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *authorizationv1.LocalSubjectAccessReview { return &authorizationv1.LocalSubjectAccessReview{} }, + ), + } +} + +// Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the subjectAccessReviewResponse, and an error, if there is any. +func (c *localSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *authorizationv1.LocalSubjectAccessReview, opts metav1.CreateOptions) (result *authorizationv1.SubjectAccessReviewResponse, err error) { + result = &authorizationv1.SubjectAccessReviewResponse{} + err = c.GetClient().Post(). + Namespace(c.GetNamespace()). + Resource("localsubjectaccessreviews"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(localSubjectAccessReview). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/resourceaccessreview.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/resourceaccessreview.go new file mode 100644 index 0000000000000..d1c47dd5147b3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/resourceaccessreview.go @@ -0,0 +1,55 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + authorizationv1 "github.com/openshift/api/authorization/v1" + scheme "github.com/openshift/client-go/authorization/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" +) + +// ResourceAccessReviewsGetter has a method to return a ResourceAccessReviewInterface. +// A group's client should implement this interface. +type ResourceAccessReviewsGetter interface { + ResourceAccessReviews() ResourceAccessReviewInterface +} + +// ResourceAccessReviewInterface has methods to work with ResourceAccessReview resources. +type ResourceAccessReviewInterface interface { + Create(ctx context.Context, resourceAccessReview *authorizationv1.ResourceAccessReview, opts metav1.CreateOptions) (*authorizationv1.ResourceAccessReviewResponse, error) + + ResourceAccessReviewExpansion +} + +// resourceAccessReviews implements ResourceAccessReviewInterface +type resourceAccessReviews struct { + *gentype.Client[*authorizationv1.ResourceAccessReview] +} + +// newResourceAccessReviews returns a ResourceAccessReviews +func newResourceAccessReviews(c *AuthorizationV1Client) *resourceAccessReviews { + return &resourceAccessReviews{ + gentype.NewClient[*authorizationv1.ResourceAccessReview]( + "resourceaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *authorizationv1.ResourceAccessReview { return &authorizationv1.ResourceAccessReview{} }, + ), + } +} + +// Create takes the representation of a resourceAccessReview and creates it. Returns the server's representation of the resourceAccessReviewResponse, and an error, if there is any. +func (c *resourceAccessReviews) Create(ctx context.Context, resourceAccessReview *authorizationv1.ResourceAccessReview, opts metav1.CreateOptions) (result *authorizationv1.ResourceAccessReviewResponse, err error) { + result = &authorizationv1.ResourceAccessReviewResponse{} + err = c.GetClient().Post(). + Resource("resourceaccessreviews"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceAccessReview). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/role.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/role.go new file mode 100644 index 0000000000000..eeb5fd649ec47 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/role.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + authorizationv1 "github.com/openshift/api/authorization/v1" + applyconfigurationsauthorizationv1 "github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1" + scheme "github.com/openshift/client-go/authorization/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// RolesGetter has a method to return a RoleInterface. +// A group's client should implement this interface. +type RolesGetter interface { + Roles(namespace string) RoleInterface +} + +// RoleInterface has methods to work with Role resources. +type RoleInterface interface { + Create(ctx context.Context, role *authorizationv1.Role, opts metav1.CreateOptions) (*authorizationv1.Role, error) + Update(ctx context.Context, role *authorizationv1.Role, opts metav1.UpdateOptions) (*authorizationv1.Role, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*authorizationv1.Role, error) + List(ctx context.Context, opts metav1.ListOptions) (*authorizationv1.RoleList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *authorizationv1.Role, err error) + Apply(ctx context.Context, role *applyconfigurationsauthorizationv1.RoleApplyConfiguration, opts metav1.ApplyOptions) (result *authorizationv1.Role, err error) + RoleExpansion +} + +// roles implements RoleInterface +type roles struct { + *gentype.ClientWithListAndApply[*authorizationv1.Role, *authorizationv1.RoleList, *applyconfigurationsauthorizationv1.RoleApplyConfiguration] +} + +// newRoles returns a Roles +func newRoles(c *AuthorizationV1Client, namespace string) *roles { + return &roles{ + gentype.NewClientWithListAndApply[*authorizationv1.Role, *authorizationv1.RoleList, *applyconfigurationsauthorizationv1.RoleApplyConfiguration]( + "roles", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *authorizationv1.Role { return &authorizationv1.Role{} }, + func() *authorizationv1.RoleList { return &authorizationv1.RoleList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/rolebinding.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/rolebinding.go new file mode 100644 index 0000000000000..f8f9a53a76588 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/rolebinding.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + authorizationv1 "github.com/openshift/api/authorization/v1" + applyconfigurationsauthorizationv1 "github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1" + scheme "github.com/openshift/client-go/authorization/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// RoleBindingsGetter has a method to return a RoleBindingInterface. +// A group's client should implement this interface. +type RoleBindingsGetter interface { + RoleBindings(namespace string) RoleBindingInterface +} + +// RoleBindingInterface has methods to work with RoleBinding resources. +type RoleBindingInterface interface { + Create(ctx context.Context, roleBinding *authorizationv1.RoleBinding, opts metav1.CreateOptions) (*authorizationv1.RoleBinding, error) + Update(ctx context.Context, roleBinding *authorizationv1.RoleBinding, opts metav1.UpdateOptions) (*authorizationv1.RoleBinding, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*authorizationv1.RoleBinding, error) + List(ctx context.Context, opts metav1.ListOptions) (*authorizationv1.RoleBindingList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *authorizationv1.RoleBinding, err error) + Apply(ctx context.Context, roleBinding *applyconfigurationsauthorizationv1.RoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *authorizationv1.RoleBinding, err error) + RoleBindingExpansion +} + +// roleBindings implements RoleBindingInterface +type roleBindings struct { + *gentype.ClientWithListAndApply[*authorizationv1.RoleBinding, *authorizationv1.RoleBindingList, *applyconfigurationsauthorizationv1.RoleBindingApplyConfiguration] +} + +// newRoleBindings returns a RoleBindings +func newRoleBindings(c *AuthorizationV1Client, namespace string) *roleBindings { + return &roleBindings{ + gentype.NewClientWithListAndApply[*authorizationv1.RoleBinding, *authorizationv1.RoleBindingList, *applyconfigurationsauthorizationv1.RoleBindingApplyConfiguration]( + "rolebindings", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *authorizationv1.RoleBinding { return &authorizationv1.RoleBinding{} }, + func() *authorizationv1.RoleBindingList { return &authorizationv1.RoleBindingList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/rolebindingrestriction.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/rolebindingrestriction.go new file mode 100644 index 0000000000000..28269c0a84b4a --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/rolebindingrestriction.go @@ -0,0 +1,56 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + authorizationv1 "github.com/openshift/api/authorization/v1" + applyconfigurationsauthorizationv1 "github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1" + scheme "github.com/openshift/client-go/authorization/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// RoleBindingRestrictionsGetter has a method to return a RoleBindingRestrictionInterface. +// A group's client should implement this interface. +type RoleBindingRestrictionsGetter interface { + RoleBindingRestrictions(namespace string) RoleBindingRestrictionInterface +} + +// RoleBindingRestrictionInterface has methods to work with RoleBindingRestriction resources. +type RoleBindingRestrictionInterface interface { + Create(ctx context.Context, roleBindingRestriction *authorizationv1.RoleBindingRestriction, opts metav1.CreateOptions) (*authorizationv1.RoleBindingRestriction, error) + Update(ctx context.Context, roleBindingRestriction *authorizationv1.RoleBindingRestriction, opts metav1.UpdateOptions) (*authorizationv1.RoleBindingRestriction, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*authorizationv1.RoleBindingRestriction, error) + List(ctx context.Context, opts metav1.ListOptions) (*authorizationv1.RoleBindingRestrictionList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *authorizationv1.RoleBindingRestriction, err error) + Apply(ctx context.Context, roleBindingRestriction *applyconfigurationsauthorizationv1.RoleBindingRestrictionApplyConfiguration, opts metav1.ApplyOptions) (result *authorizationv1.RoleBindingRestriction, err error) + RoleBindingRestrictionExpansion +} + +// roleBindingRestrictions implements RoleBindingRestrictionInterface +type roleBindingRestrictions struct { + *gentype.ClientWithListAndApply[*authorizationv1.RoleBindingRestriction, *authorizationv1.RoleBindingRestrictionList, *applyconfigurationsauthorizationv1.RoleBindingRestrictionApplyConfiguration] +} + +// newRoleBindingRestrictions returns a RoleBindingRestrictions +func newRoleBindingRestrictions(c *AuthorizationV1Client, namespace string) *roleBindingRestrictions { + return &roleBindingRestrictions{ + gentype.NewClientWithListAndApply[*authorizationv1.RoleBindingRestriction, *authorizationv1.RoleBindingRestrictionList, *applyconfigurationsauthorizationv1.RoleBindingRestrictionApplyConfiguration]( + "rolebindingrestrictions", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *authorizationv1.RoleBindingRestriction { return &authorizationv1.RoleBindingRestriction{} }, + func() *authorizationv1.RoleBindingRestrictionList { + return &authorizationv1.RoleBindingRestrictionList{} + }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/selfsubjectrulesreview.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/selfsubjectrulesreview.go new file mode 100644 index 0000000000000..5a95570693983 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/selfsubjectrulesreview.go @@ -0,0 +1,42 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + authorizationv1 "github.com/openshift/api/authorization/v1" + scheme "github.com/openshift/client-go/authorization/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" +) + +// SelfSubjectRulesReviewsGetter has a method to return a SelfSubjectRulesReviewInterface. +// A group's client should implement this interface. +type SelfSubjectRulesReviewsGetter interface { + SelfSubjectRulesReviews(namespace string) SelfSubjectRulesReviewInterface +} + +// SelfSubjectRulesReviewInterface has methods to work with SelfSubjectRulesReview resources. +type SelfSubjectRulesReviewInterface interface { + Create(ctx context.Context, selfSubjectRulesReview *authorizationv1.SelfSubjectRulesReview, opts metav1.CreateOptions) (*authorizationv1.SelfSubjectRulesReview, error) + SelfSubjectRulesReviewExpansion +} + +// selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface +type selfSubjectRulesReviews struct { + *gentype.Client[*authorizationv1.SelfSubjectRulesReview] +} + +// newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews +func newSelfSubjectRulesReviews(c *AuthorizationV1Client, namespace string) *selfSubjectRulesReviews { + return &selfSubjectRulesReviews{ + gentype.NewClient[*authorizationv1.SelfSubjectRulesReview]( + "selfsubjectrulesreviews", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *authorizationv1.SelfSubjectRulesReview { return &authorizationv1.SelfSubjectRulesReview{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/subjectaccessreview.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/subjectaccessreview.go new file mode 100644 index 0000000000000..241ba05c83c4a --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/subjectaccessreview.go @@ -0,0 +1,55 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + authorizationv1 "github.com/openshift/api/authorization/v1" + scheme "github.com/openshift/client-go/authorization/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" +) + +// SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface. +// A group's client should implement this interface. +type SubjectAccessReviewsGetter interface { + SubjectAccessReviews() SubjectAccessReviewInterface +} + +// SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources. +type SubjectAccessReviewInterface interface { + Create(ctx context.Context, subjectAccessReview *authorizationv1.SubjectAccessReview, opts metav1.CreateOptions) (*authorizationv1.SubjectAccessReviewResponse, error) + + SubjectAccessReviewExpansion +} + +// subjectAccessReviews implements SubjectAccessReviewInterface +type subjectAccessReviews struct { + *gentype.Client[*authorizationv1.SubjectAccessReview] +} + +// newSubjectAccessReviews returns a SubjectAccessReviews +func newSubjectAccessReviews(c *AuthorizationV1Client) *subjectAccessReviews { + return &subjectAccessReviews{ + gentype.NewClient[*authorizationv1.SubjectAccessReview]( + "subjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *authorizationv1.SubjectAccessReview { return &authorizationv1.SubjectAccessReview{} }, + ), + } +} + +// Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReviewResponse, and an error, if there is any. +func (c *subjectAccessReviews) Create(ctx context.Context, subjectAccessReview *authorizationv1.SubjectAccessReview, opts metav1.CreateOptions) (result *authorizationv1.SubjectAccessReviewResponse, err error) { + result = &authorizationv1.SubjectAccessReviewResponse{} + err = c.GetClient().Post(). + Resource("subjectaccessreviews"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(subjectAccessReview). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/subjectrulesreview.go b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/subjectrulesreview.go new file mode 100644 index 0000000000000..f09652ad99caf --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/subjectrulesreview.go @@ -0,0 +1,42 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + authorizationv1 "github.com/openshift/api/authorization/v1" + scheme "github.com/openshift/client-go/authorization/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" +) + +// SubjectRulesReviewsGetter has a method to return a SubjectRulesReviewInterface. +// A group's client should implement this interface. +type SubjectRulesReviewsGetter interface { + SubjectRulesReviews(namespace string) SubjectRulesReviewInterface +} + +// SubjectRulesReviewInterface has methods to work with SubjectRulesReview resources. +type SubjectRulesReviewInterface interface { + Create(ctx context.Context, subjectRulesReview *authorizationv1.SubjectRulesReview, opts metav1.CreateOptions) (*authorizationv1.SubjectRulesReview, error) + SubjectRulesReviewExpansion +} + +// subjectRulesReviews implements SubjectRulesReviewInterface +type subjectRulesReviews struct { + *gentype.Client[*authorizationv1.SubjectRulesReview] +} + +// newSubjectRulesReviews returns a SubjectRulesReviews +func newSubjectRulesReviews(c *AuthorizationV1Client, namespace string) *subjectRulesReviews { + return &subjectRulesReviews{ + gentype.NewClient[*authorizationv1.SubjectRulesReview]( + "subjectrulesreviews", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *authorizationv1.SubjectRulesReview { return &authorizationv1.SubjectRulesReview{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/authorization/informers/externalversions/authorization/interface.go b/vendor/github.com/openshift/client-go/authorization/informers/externalversions/authorization/interface.go new file mode 100644 index 0000000000000..b5cdb4853b6b0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/informers/externalversions/authorization/interface.go @@ -0,0 +1,30 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package authorization + +import ( + v1 "github.com/openshift/client-go/authorization/informers/externalversions/authorization/v1" + internalinterfaces "github.com/openshift/client-go/authorization/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/authorization/informers/externalversions/authorization/v1/clusterrole.go b/vendor/github.com/openshift/client-go/authorization/informers/externalversions/authorization/v1/clusterrole.go new file mode 100644 index 0000000000000..3fbe8fac56d8b --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/informers/externalversions/authorization/v1/clusterrole.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiauthorizationv1 "github.com/openshift/api/authorization/v1" + versioned "github.com/openshift/client-go/authorization/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/authorization/informers/externalversions/internalinterfaces" + authorizationv1 "github.com/openshift/client-go/authorization/listers/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterRoleInformer provides access to a shared informer and lister for +// ClusterRoles. +type ClusterRoleInformer interface { + Informer() cache.SharedIndexInformer + Lister() authorizationv1.ClusterRoleLister +} + +type clusterRoleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterRoleInformer constructs a new informer for ClusterRole type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterRoleInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterRoleInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterRoleInformer constructs a new informer for ClusterRole type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterRoleInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AuthorizationV1().ClusterRoles().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AuthorizationV1().ClusterRoles().Watch(context.TODO(), options) + }, + }, + &apiauthorizationv1.ClusterRole{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterRoleInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterRoleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiauthorizationv1.ClusterRole{}, f.defaultInformer) +} + +func (f *clusterRoleInformer) Lister() authorizationv1.ClusterRoleLister { + return authorizationv1.NewClusterRoleLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/authorization/informers/externalversions/authorization/v1/clusterrolebinding.go b/vendor/github.com/openshift/client-go/authorization/informers/externalversions/authorization/v1/clusterrolebinding.go new file mode 100644 index 0000000000000..9d477c4fd0928 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/informers/externalversions/authorization/v1/clusterrolebinding.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiauthorizationv1 "github.com/openshift/api/authorization/v1" + versioned "github.com/openshift/client-go/authorization/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/authorization/informers/externalversions/internalinterfaces" + authorizationv1 "github.com/openshift/client-go/authorization/listers/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterRoleBindingInformer provides access to a shared informer and lister for +// ClusterRoleBindings. +type ClusterRoleBindingInformer interface { + Informer() cache.SharedIndexInformer + Lister() authorizationv1.ClusterRoleBindingLister +} + +type clusterRoleBindingInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterRoleBindingInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterRoleBindingInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AuthorizationV1().ClusterRoleBindings().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AuthorizationV1().ClusterRoleBindings().Watch(context.TODO(), options) + }, + }, + &apiauthorizationv1.ClusterRoleBinding{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterRoleBindingInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiauthorizationv1.ClusterRoleBinding{}, f.defaultInformer) +} + +func (f *clusterRoleBindingInformer) Lister() authorizationv1.ClusterRoleBindingLister { + return authorizationv1.NewClusterRoleBindingLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/authorization/informers/externalversions/authorization/v1/interface.go b/vendor/github.com/openshift/client-go/authorization/informers/externalversions/authorization/v1/interface.go new file mode 100644 index 0000000000000..803cd2f3b5d6b --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/informers/externalversions/authorization/v1/interface.go @@ -0,0 +1,57 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/openshift/client-go/authorization/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ClusterRoles returns a ClusterRoleInformer. + ClusterRoles() ClusterRoleInformer + // ClusterRoleBindings returns a ClusterRoleBindingInformer. + ClusterRoleBindings() ClusterRoleBindingInformer + // Roles returns a RoleInformer. + Roles() RoleInformer + // RoleBindings returns a RoleBindingInformer. + RoleBindings() RoleBindingInformer + // RoleBindingRestrictions returns a RoleBindingRestrictionInformer. + RoleBindingRestrictions() RoleBindingRestrictionInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ClusterRoles returns a ClusterRoleInformer. +func (v *version) ClusterRoles() ClusterRoleInformer { + return &clusterRoleInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ClusterRoleBindings returns a ClusterRoleBindingInformer. +func (v *version) ClusterRoleBindings() ClusterRoleBindingInformer { + return &clusterRoleBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Roles returns a RoleInformer. +func (v *version) Roles() RoleInformer { + return &roleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// RoleBindings returns a RoleBindingInformer. +func (v *version) RoleBindings() RoleBindingInformer { + return &roleBindingInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// RoleBindingRestrictions returns a RoleBindingRestrictionInformer. +func (v *version) RoleBindingRestrictions() RoleBindingRestrictionInformer { + return &roleBindingRestrictionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/authorization/informers/externalversions/authorization/v1/role.go b/vendor/github.com/openshift/client-go/authorization/informers/externalversions/authorization/v1/role.go new file mode 100644 index 0000000000000..b75793a356d84 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/informers/externalversions/authorization/v1/role.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiauthorizationv1 "github.com/openshift/api/authorization/v1" + versioned "github.com/openshift/client-go/authorization/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/authorization/informers/externalversions/internalinterfaces" + authorizationv1 "github.com/openshift/client-go/authorization/listers/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// RoleInformer provides access to a shared informer and lister for +// Roles. +type RoleInformer interface { + Informer() cache.SharedIndexInformer + Lister() authorizationv1.RoleLister +} + +type roleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRoleInformer constructs a new informer for Role type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRoleInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleInformer constructs a new informer for Role type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AuthorizationV1().Roles(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AuthorizationV1().Roles(namespace).Watch(context.TODO(), options) + }, + }, + &apiauthorizationv1.Role{}, + resyncPeriod, + indexers, + ) +} + +func (f *roleInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *roleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiauthorizationv1.Role{}, f.defaultInformer) +} + +func (f *roleInformer) Lister() authorizationv1.RoleLister { + return authorizationv1.NewRoleLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/authorization/informers/externalversions/authorization/v1/rolebinding.go b/vendor/github.com/openshift/client-go/authorization/informers/externalversions/authorization/v1/rolebinding.go new file mode 100644 index 0000000000000..91d68fb1cae5a --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/informers/externalversions/authorization/v1/rolebinding.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiauthorizationv1 "github.com/openshift/api/authorization/v1" + versioned "github.com/openshift/client-go/authorization/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/authorization/informers/externalversions/internalinterfaces" + authorizationv1 "github.com/openshift/client-go/authorization/listers/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// RoleBindingInformer provides access to a shared informer and lister for +// RoleBindings. +type RoleBindingInformer interface { + Informer() cache.SharedIndexInformer + Lister() authorizationv1.RoleBindingLister +} + +type roleBindingInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRoleBindingInformer constructs a new informer for RoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRoleBindingInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleBindingInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleBindingInformer constructs a new informer for RoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleBindingInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AuthorizationV1().RoleBindings(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AuthorizationV1().RoleBindings(namespace).Watch(context.TODO(), options) + }, + }, + &apiauthorizationv1.RoleBinding{}, + resyncPeriod, + indexers, + ) +} + +func (f *roleBindingInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleBindingInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *roleBindingInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiauthorizationv1.RoleBinding{}, f.defaultInformer) +} + +func (f *roleBindingInformer) Lister() authorizationv1.RoleBindingLister { + return authorizationv1.NewRoleBindingLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/authorization/informers/externalversions/authorization/v1/rolebindingrestriction.go b/vendor/github.com/openshift/client-go/authorization/informers/externalversions/authorization/v1/rolebindingrestriction.go new file mode 100644 index 0000000000000..577c59e9b1415 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/informers/externalversions/authorization/v1/rolebindingrestriction.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiauthorizationv1 "github.com/openshift/api/authorization/v1" + versioned "github.com/openshift/client-go/authorization/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/authorization/informers/externalversions/internalinterfaces" + authorizationv1 "github.com/openshift/client-go/authorization/listers/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// RoleBindingRestrictionInformer provides access to a shared informer and lister for +// RoleBindingRestrictions. +type RoleBindingRestrictionInformer interface { + Informer() cache.SharedIndexInformer + Lister() authorizationv1.RoleBindingRestrictionLister +} + +type roleBindingRestrictionInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRoleBindingRestrictionInformer constructs a new informer for RoleBindingRestriction type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRoleBindingRestrictionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleBindingRestrictionInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleBindingRestrictionInformer constructs a new informer for RoleBindingRestriction type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleBindingRestrictionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AuthorizationV1().RoleBindingRestrictions(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AuthorizationV1().RoleBindingRestrictions(namespace).Watch(context.TODO(), options) + }, + }, + &apiauthorizationv1.RoleBindingRestriction{}, + resyncPeriod, + indexers, + ) +} + +func (f *roleBindingRestrictionInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleBindingRestrictionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *roleBindingRestrictionInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiauthorizationv1.RoleBindingRestriction{}, f.defaultInformer) +} + +func (f *roleBindingRestrictionInformer) Lister() authorizationv1.RoleBindingRestrictionLister { + return authorizationv1.NewRoleBindingRestrictionLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/authorization/informers/externalversions/factory.go b/vendor/github.com/openshift/client-go/authorization/informers/externalversions/factory.go new file mode 100644 index 0000000000000..56ad0bda3daf4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/informers/externalversions/factory.go @@ -0,0 +1,246 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/openshift/client-go/authorization/clientset/versioned" + authorization "github.com/openshift/client-go/authorization/informers/externalversions/authorization" + internalinterfaces "github.com/openshift/client-go/authorization/informers/externalversions/internalinterfaces" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + transform cache.TransformFunc + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.shuttingDown { + return + } + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() + f.startedInformers[informerType] = true + } + } +} + +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + + Authorization() authorization.Interface +} + +func (f *sharedInformerFactory) Authorization() authorization.Interface { + return authorization.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/authorization/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/authorization/informers/externalversions/generic.go new file mode 100644 index 0000000000000..5c01f7cc0d829 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/informers/externalversions/generic.go @@ -0,0 +1,54 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + fmt "fmt" + + v1 "github.com/openshift/api/authorization/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=authorization.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("clusterroles"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Authorization().V1().ClusterRoles().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("clusterrolebindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Authorization().V1().ClusterRoleBindings().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("roles"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Authorization().V1().Roles().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("rolebindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Authorization().V1().RoleBindings().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("rolebindingrestrictions"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Authorization().V1().RoleBindingRestrictions().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/openshift/client-go/authorization/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/openshift/client-go/authorization/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000000..d0a681edd2827 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,24 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/openshift/client-go/authorization/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/openshift/client-go/authorization/listers/authorization/v1/clusterrole.go b/vendor/github.com/openshift/client-go/authorization/listers/authorization/v1/clusterrole.go new file mode 100644 index 0000000000000..909d13f3b2443 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/listers/authorization/v1/clusterrole.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + authorizationv1 "github.com/openshift/api/authorization/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterRoleLister helps list ClusterRoles. +// All objects returned here must be treated as read-only. +type ClusterRoleLister interface { + // List lists all ClusterRoles in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*authorizationv1.ClusterRole, err error) + // Get retrieves the ClusterRole from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*authorizationv1.ClusterRole, error) + ClusterRoleListerExpansion +} + +// clusterRoleLister implements the ClusterRoleLister interface. +type clusterRoleLister struct { + listers.ResourceIndexer[*authorizationv1.ClusterRole] +} + +// NewClusterRoleLister returns a new ClusterRoleLister. +func NewClusterRoleLister(indexer cache.Indexer) ClusterRoleLister { + return &clusterRoleLister{listers.New[*authorizationv1.ClusterRole](indexer, authorizationv1.Resource("clusterrole"))} +} diff --git a/vendor/github.com/openshift/client-go/authorization/listers/authorization/v1/clusterrolebinding.go b/vendor/github.com/openshift/client-go/authorization/listers/authorization/v1/clusterrolebinding.go new file mode 100644 index 0000000000000..0526894f87b52 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/listers/authorization/v1/clusterrolebinding.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + authorizationv1 "github.com/openshift/api/authorization/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterRoleBindingLister helps list ClusterRoleBindings. +// All objects returned here must be treated as read-only. +type ClusterRoleBindingLister interface { + // List lists all ClusterRoleBindings in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*authorizationv1.ClusterRoleBinding, err error) + // Get retrieves the ClusterRoleBinding from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*authorizationv1.ClusterRoleBinding, error) + ClusterRoleBindingListerExpansion +} + +// clusterRoleBindingLister implements the ClusterRoleBindingLister interface. +type clusterRoleBindingLister struct { + listers.ResourceIndexer[*authorizationv1.ClusterRoleBinding] +} + +// NewClusterRoleBindingLister returns a new ClusterRoleBindingLister. +func NewClusterRoleBindingLister(indexer cache.Indexer) ClusterRoleBindingLister { + return &clusterRoleBindingLister{listers.New[*authorizationv1.ClusterRoleBinding](indexer, authorizationv1.Resource("clusterrolebinding"))} +} diff --git a/vendor/github.com/openshift/client-go/authorization/listers/authorization/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/authorization/listers/authorization/v1/expansion_generated.go new file mode 100644 index 0000000000000..0c01759a3d461 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/listers/authorization/v1/expansion_generated.go @@ -0,0 +1,35 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// ClusterRoleListerExpansion allows custom methods to be added to +// ClusterRoleLister. +type ClusterRoleListerExpansion interface{} + +// ClusterRoleBindingListerExpansion allows custom methods to be added to +// ClusterRoleBindingLister. +type ClusterRoleBindingListerExpansion interface{} + +// RoleListerExpansion allows custom methods to be added to +// RoleLister. +type RoleListerExpansion interface{} + +// RoleNamespaceListerExpansion allows custom methods to be added to +// RoleNamespaceLister. +type RoleNamespaceListerExpansion interface{} + +// RoleBindingListerExpansion allows custom methods to be added to +// RoleBindingLister. +type RoleBindingListerExpansion interface{} + +// RoleBindingNamespaceListerExpansion allows custom methods to be added to +// RoleBindingNamespaceLister. +type RoleBindingNamespaceListerExpansion interface{} + +// RoleBindingRestrictionListerExpansion allows custom methods to be added to +// RoleBindingRestrictionLister. +type RoleBindingRestrictionListerExpansion interface{} + +// RoleBindingRestrictionNamespaceListerExpansion allows custom methods to be added to +// RoleBindingRestrictionNamespaceLister. +type RoleBindingRestrictionNamespaceListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/authorization/listers/authorization/v1/role.go b/vendor/github.com/openshift/client-go/authorization/listers/authorization/v1/role.go new file mode 100644 index 0000000000000..da2d60f1eea11 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/listers/authorization/v1/role.go @@ -0,0 +1,54 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + authorizationv1 "github.com/openshift/api/authorization/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// RoleLister helps list Roles. +// All objects returned here must be treated as read-only. +type RoleLister interface { + // List lists all Roles in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*authorizationv1.Role, err error) + // Roles returns an object that can list and get Roles. + Roles(namespace string) RoleNamespaceLister + RoleListerExpansion +} + +// roleLister implements the RoleLister interface. +type roleLister struct { + listers.ResourceIndexer[*authorizationv1.Role] +} + +// NewRoleLister returns a new RoleLister. +func NewRoleLister(indexer cache.Indexer) RoleLister { + return &roleLister{listers.New[*authorizationv1.Role](indexer, authorizationv1.Resource("role"))} +} + +// Roles returns an object that can list and get Roles. +func (s *roleLister) Roles(namespace string) RoleNamespaceLister { + return roleNamespaceLister{listers.NewNamespaced[*authorizationv1.Role](s.ResourceIndexer, namespace)} +} + +// RoleNamespaceLister helps list and get Roles. +// All objects returned here must be treated as read-only. +type RoleNamespaceLister interface { + // List lists all Roles in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*authorizationv1.Role, err error) + // Get retrieves the Role from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*authorizationv1.Role, error) + RoleNamespaceListerExpansion +} + +// roleNamespaceLister implements the RoleNamespaceLister +// interface. +type roleNamespaceLister struct { + listers.ResourceIndexer[*authorizationv1.Role] +} diff --git a/vendor/github.com/openshift/client-go/authorization/listers/authorization/v1/rolebinding.go b/vendor/github.com/openshift/client-go/authorization/listers/authorization/v1/rolebinding.go new file mode 100644 index 0000000000000..85acdacece091 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/listers/authorization/v1/rolebinding.go @@ -0,0 +1,54 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + authorizationv1 "github.com/openshift/api/authorization/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// RoleBindingLister helps list RoleBindings. +// All objects returned here must be treated as read-only. +type RoleBindingLister interface { + // List lists all RoleBindings in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*authorizationv1.RoleBinding, err error) + // RoleBindings returns an object that can list and get RoleBindings. + RoleBindings(namespace string) RoleBindingNamespaceLister + RoleBindingListerExpansion +} + +// roleBindingLister implements the RoleBindingLister interface. +type roleBindingLister struct { + listers.ResourceIndexer[*authorizationv1.RoleBinding] +} + +// NewRoleBindingLister returns a new RoleBindingLister. +func NewRoleBindingLister(indexer cache.Indexer) RoleBindingLister { + return &roleBindingLister{listers.New[*authorizationv1.RoleBinding](indexer, authorizationv1.Resource("rolebinding"))} +} + +// RoleBindings returns an object that can list and get RoleBindings. +func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister { + return roleBindingNamespaceLister{listers.NewNamespaced[*authorizationv1.RoleBinding](s.ResourceIndexer, namespace)} +} + +// RoleBindingNamespaceLister helps list and get RoleBindings. +// All objects returned here must be treated as read-only. +type RoleBindingNamespaceLister interface { + // List lists all RoleBindings in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*authorizationv1.RoleBinding, err error) + // Get retrieves the RoleBinding from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*authorizationv1.RoleBinding, error) + RoleBindingNamespaceListerExpansion +} + +// roleBindingNamespaceLister implements the RoleBindingNamespaceLister +// interface. +type roleBindingNamespaceLister struct { + listers.ResourceIndexer[*authorizationv1.RoleBinding] +} diff --git a/vendor/github.com/openshift/client-go/authorization/listers/authorization/v1/rolebindingrestriction.go b/vendor/github.com/openshift/client-go/authorization/listers/authorization/v1/rolebindingrestriction.go new file mode 100644 index 0000000000000..6b720253f4a14 --- /dev/null +++ b/vendor/github.com/openshift/client-go/authorization/listers/authorization/v1/rolebindingrestriction.go @@ -0,0 +1,54 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + authorizationv1 "github.com/openshift/api/authorization/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// RoleBindingRestrictionLister helps list RoleBindingRestrictions. +// All objects returned here must be treated as read-only. +type RoleBindingRestrictionLister interface { + // List lists all RoleBindingRestrictions in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*authorizationv1.RoleBindingRestriction, err error) + // RoleBindingRestrictions returns an object that can list and get RoleBindingRestrictions. + RoleBindingRestrictions(namespace string) RoleBindingRestrictionNamespaceLister + RoleBindingRestrictionListerExpansion +} + +// roleBindingRestrictionLister implements the RoleBindingRestrictionLister interface. +type roleBindingRestrictionLister struct { + listers.ResourceIndexer[*authorizationv1.RoleBindingRestriction] +} + +// NewRoleBindingRestrictionLister returns a new RoleBindingRestrictionLister. +func NewRoleBindingRestrictionLister(indexer cache.Indexer) RoleBindingRestrictionLister { + return &roleBindingRestrictionLister{listers.New[*authorizationv1.RoleBindingRestriction](indexer, authorizationv1.Resource("rolebindingrestriction"))} +} + +// RoleBindingRestrictions returns an object that can list and get RoleBindingRestrictions. +func (s *roleBindingRestrictionLister) RoleBindingRestrictions(namespace string) RoleBindingRestrictionNamespaceLister { + return roleBindingRestrictionNamespaceLister{listers.NewNamespaced[*authorizationv1.RoleBindingRestriction](s.ResourceIndexer, namespace)} +} + +// RoleBindingRestrictionNamespaceLister helps list and get RoleBindingRestrictions. +// All objects returned here must be treated as read-only. +type RoleBindingRestrictionNamespaceLister interface { + // List lists all RoleBindingRestrictions in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*authorizationv1.RoleBindingRestriction, err error) + // Get retrieves the RoleBindingRestriction from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*authorizationv1.RoleBindingRestriction, error) + RoleBindingRestrictionNamespaceListerExpansion +} + +// roleBindingRestrictionNamespaceLister implements the RoleBindingRestrictionNamespaceLister +// interface. +type roleBindingRestrictionNamespaceLister struct { + listers.ResourceIndexer[*authorizationv1.RoleBindingRestriction] +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/binarybuildsource.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/binarybuildsource.go new file mode 100644 index 0000000000000..45e7cc53f5454 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/binarybuildsource.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// BinaryBuildSourceApplyConfiguration represents a declarative configuration of the BinaryBuildSource type for use +// with apply. +type BinaryBuildSourceApplyConfiguration struct { + AsFile *string `json:"asFile,omitempty"` +} + +// BinaryBuildSourceApplyConfiguration constructs a declarative configuration of the BinaryBuildSource type for use with +// apply. +func BinaryBuildSource() *BinaryBuildSourceApplyConfiguration { + return &BinaryBuildSourceApplyConfiguration{} +} + +// WithAsFile sets the AsFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AsFile field is set to the value of the last call. +func (b *BinaryBuildSourceApplyConfiguration) WithAsFile(value string) *BinaryBuildSourceApplyConfiguration { + b.AsFile = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/bitbucketwebhookcause.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/bitbucketwebhookcause.go new file mode 100644 index 0000000000000..2a40f7f544732 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/bitbucketwebhookcause.go @@ -0,0 +1,31 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// BitbucketWebHookCauseApplyConfiguration represents a declarative configuration of the BitbucketWebHookCause type for use +// with apply. +type BitbucketWebHookCauseApplyConfiguration struct { + CommonWebHookCauseApplyConfiguration `json:",inline"` +} + +// BitbucketWebHookCauseApplyConfiguration constructs a declarative configuration of the BitbucketWebHookCause type for use with +// apply. +func BitbucketWebHookCause() *BitbucketWebHookCauseApplyConfiguration { + return &BitbucketWebHookCauseApplyConfiguration{} +} + +// WithRevision sets the Revision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Revision field is set to the value of the last call. +func (b *BitbucketWebHookCauseApplyConfiguration) WithRevision(value *SourceRevisionApplyConfiguration) *BitbucketWebHookCauseApplyConfiguration { + b.CommonWebHookCauseApplyConfiguration.Revision = value + return b +} + +// WithSecret sets the Secret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Secret field is set to the value of the last call. +func (b *BitbucketWebHookCauseApplyConfiguration) WithSecret(value string) *BitbucketWebHookCauseApplyConfiguration { + b.CommonWebHookCauseApplyConfiguration.Secret = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/build.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/build.go new file mode 100644 index 0000000000000..679d04d161527 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/build.go @@ -0,0 +1,248 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" + internal "github.com/openshift/client-go/build/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// BuildApplyConfiguration represents a declarative configuration of the Build type for use +// with apply. +type BuildApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *BuildSpecApplyConfiguration `json:"spec,omitempty"` + Status *BuildStatusApplyConfiguration `json:"status,omitempty"` +} + +// Build constructs a declarative configuration of the Build type for use with +// apply. +func Build(name, namespace string) *BuildApplyConfiguration { + b := &BuildApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("Build") + b.WithAPIVersion("build.openshift.io/v1") + return b +} + +// ExtractBuild extracts the applied configuration owned by fieldManager from +// build. If no managedFields are found in build for fieldManager, a +// BuildApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// build must be a unmodified Build API object that was retrieved from the Kubernetes API. +// ExtractBuild provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractBuild(build *buildv1.Build, fieldManager string) (*BuildApplyConfiguration, error) { + return extractBuild(build, fieldManager, "") +} + +// ExtractBuildStatus is the same as ExtractBuild except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractBuildStatus(build *buildv1.Build, fieldManager string) (*BuildApplyConfiguration, error) { + return extractBuild(build, fieldManager, "status") +} + +func extractBuild(build *buildv1.Build, fieldManager string, subresource string) (*BuildApplyConfiguration, error) { + b := &BuildApplyConfiguration{} + err := managedfields.ExtractInto(build, internal.Parser().Type("com.github.openshift.api.build.v1.Build"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(build.Name) + b.WithNamespace(build.Namespace) + + b.WithKind("Build") + b.WithAPIVersion("build.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithKind(value string) *BuildApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithAPIVersion(value string) *BuildApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithName(value string) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithGenerateName(value string) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithNamespace(value string) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithUID(value types.UID) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithResourceVersion(value string) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithGeneration(value int64) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *BuildApplyConfiguration) WithLabels(entries map[string]string) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *BuildApplyConfiguration) WithAnnotations(entries map[string]string) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *BuildApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *BuildApplyConfiguration) WithFinalizers(values ...string) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *BuildApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithSpec(value *BuildSpecApplyConfiguration) *BuildApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithStatus(value *BuildStatusApplyConfiguration) *BuildApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *BuildApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildcondition.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildcondition.go new file mode 100644 index 0000000000000..0f58e891b8136 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildcondition.go @@ -0,0 +1,74 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// BuildConditionApplyConfiguration represents a declarative configuration of the BuildCondition type for use +// with apply. +type BuildConditionApplyConfiguration struct { + Type *buildv1.BuildConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// BuildConditionApplyConfiguration constructs a declarative configuration of the BuildCondition type for use with +// apply. +func BuildCondition() *BuildConditionApplyConfiguration { + return &BuildConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *BuildConditionApplyConfiguration) WithType(value buildv1.BuildConditionType) *BuildConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *BuildConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *BuildConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastUpdateTime sets the LastUpdateTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastUpdateTime field is set to the value of the last call. +func (b *BuildConditionApplyConfiguration) WithLastUpdateTime(value metav1.Time) *BuildConditionApplyConfiguration { + b.LastUpdateTime = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *BuildConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *BuildConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *BuildConditionApplyConfiguration) WithReason(value string) *BuildConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *BuildConditionApplyConfiguration) WithMessage(value string) *BuildConditionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfig.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfig.go new file mode 100644 index 0000000000000..2408fc551c83b --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfig.go @@ -0,0 +1,248 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" + internal "github.com/openshift/client-go/build/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// BuildConfigApplyConfiguration represents a declarative configuration of the BuildConfig type for use +// with apply. +type BuildConfigApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *BuildConfigSpecApplyConfiguration `json:"spec,omitempty"` + Status *BuildConfigStatusApplyConfiguration `json:"status,omitempty"` +} + +// BuildConfig constructs a declarative configuration of the BuildConfig type for use with +// apply. +func BuildConfig(name, namespace string) *BuildConfigApplyConfiguration { + b := &BuildConfigApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("BuildConfig") + b.WithAPIVersion("build.openshift.io/v1") + return b +} + +// ExtractBuildConfig extracts the applied configuration owned by fieldManager from +// buildConfig. If no managedFields are found in buildConfig for fieldManager, a +// BuildConfigApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// buildConfig must be a unmodified BuildConfig API object that was retrieved from the Kubernetes API. +// ExtractBuildConfig provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractBuildConfig(buildConfig *buildv1.BuildConfig, fieldManager string) (*BuildConfigApplyConfiguration, error) { + return extractBuildConfig(buildConfig, fieldManager, "") +} + +// ExtractBuildConfigStatus is the same as ExtractBuildConfig except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractBuildConfigStatus(buildConfig *buildv1.BuildConfig, fieldManager string) (*BuildConfigApplyConfiguration, error) { + return extractBuildConfig(buildConfig, fieldManager, "status") +} + +func extractBuildConfig(buildConfig *buildv1.BuildConfig, fieldManager string, subresource string) (*BuildConfigApplyConfiguration, error) { + b := &BuildConfigApplyConfiguration{} + err := managedfields.ExtractInto(buildConfig, internal.Parser().Type("com.github.openshift.api.build.v1.BuildConfig"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(buildConfig.Name) + b.WithNamespace(buildConfig.Namespace) + + b.WithKind("BuildConfig") + b.WithAPIVersion("build.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithKind(value string) *BuildConfigApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithAPIVersion(value string) *BuildConfigApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithName(value string) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithGenerateName(value string) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithNamespace(value string) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithUID(value types.UID) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithResourceVersion(value string) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithGeneration(value int64) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *BuildConfigApplyConfiguration) WithLabels(entries map[string]string) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *BuildConfigApplyConfiguration) WithAnnotations(entries map[string]string) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *BuildConfigApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *BuildConfigApplyConfiguration) WithFinalizers(values ...string) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *BuildConfigApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithSpec(value *BuildConfigSpecApplyConfiguration) *BuildConfigApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithStatus(value *BuildConfigStatusApplyConfiguration) *BuildConfigApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *BuildConfigApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfigspec.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfigspec.go new file mode 100644 index 0000000000000..07a5a0e1ac2cb --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfigspec.go @@ -0,0 +1,141 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" + corev1 "k8s.io/api/core/v1" +) + +// BuildConfigSpecApplyConfiguration represents a declarative configuration of the BuildConfigSpec type for use +// with apply. +type BuildConfigSpecApplyConfiguration struct { + Triggers []BuildTriggerPolicyApplyConfiguration `json:"triggers,omitempty"` + RunPolicy *buildv1.BuildRunPolicy `json:"runPolicy,omitempty"` + CommonSpecApplyConfiguration `json:",inline"` + SuccessfulBuildsHistoryLimit *int32 `json:"successfulBuildsHistoryLimit,omitempty"` + FailedBuildsHistoryLimit *int32 `json:"failedBuildsHistoryLimit,omitempty"` +} + +// BuildConfigSpecApplyConfiguration constructs a declarative configuration of the BuildConfigSpec type for use with +// apply. +func BuildConfigSpec() *BuildConfigSpecApplyConfiguration { + return &BuildConfigSpecApplyConfiguration{} +} + +// WithTriggers adds the given value to the Triggers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Triggers field. +func (b *BuildConfigSpecApplyConfiguration) WithTriggers(values ...*BuildTriggerPolicyApplyConfiguration) *BuildConfigSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithTriggers") + } + b.Triggers = append(b.Triggers, *values[i]) + } + return b +} + +// WithRunPolicy sets the RunPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RunPolicy field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithRunPolicy(value buildv1.BuildRunPolicy) *BuildConfigSpecApplyConfiguration { + b.RunPolicy = &value + return b +} + +// WithServiceAccount sets the ServiceAccount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceAccount field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithServiceAccount(value string) *BuildConfigSpecApplyConfiguration { + b.CommonSpecApplyConfiguration.ServiceAccount = &value + return b +} + +// WithSource sets the Source field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Source field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithSource(value *BuildSourceApplyConfiguration) *BuildConfigSpecApplyConfiguration { + b.CommonSpecApplyConfiguration.Source = value + return b +} + +// WithRevision sets the Revision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Revision field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithRevision(value *SourceRevisionApplyConfiguration) *BuildConfigSpecApplyConfiguration { + b.CommonSpecApplyConfiguration.Revision = value + return b +} + +// WithStrategy sets the Strategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Strategy field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithStrategy(value *BuildStrategyApplyConfiguration) *BuildConfigSpecApplyConfiguration { + b.CommonSpecApplyConfiguration.Strategy = value + return b +} + +// WithOutput sets the Output field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Output field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithOutput(value *BuildOutputApplyConfiguration) *BuildConfigSpecApplyConfiguration { + b.CommonSpecApplyConfiguration.Output = value + return b +} + +// WithResources sets the Resources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resources field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithResources(value corev1.ResourceRequirements) *BuildConfigSpecApplyConfiguration { + b.CommonSpecApplyConfiguration.Resources = &value + return b +} + +// WithPostCommit sets the PostCommit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PostCommit field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithPostCommit(value *BuildPostCommitSpecApplyConfiguration) *BuildConfigSpecApplyConfiguration { + b.CommonSpecApplyConfiguration.PostCommit = value + return b +} + +// WithCompletionDeadlineSeconds sets the CompletionDeadlineSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CompletionDeadlineSeconds field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithCompletionDeadlineSeconds(value int64) *BuildConfigSpecApplyConfiguration { + b.CommonSpecApplyConfiguration.CompletionDeadlineSeconds = &value + return b +} + +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeSelector field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithNodeSelector(value buildv1.OptionalNodeSelector) *BuildConfigSpecApplyConfiguration { + b.CommonSpecApplyConfiguration.NodeSelector = &value + return b +} + +// WithMountTrustedCA sets the MountTrustedCA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MountTrustedCA field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithMountTrustedCA(value bool) *BuildConfigSpecApplyConfiguration { + b.CommonSpecApplyConfiguration.MountTrustedCA = &value + return b +} + +// WithSuccessfulBuildsHistoryLimit sets the SuccessfulBuildsHistoryLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SuccessfulBuildsHistoryLimit field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithSuccessfulBuildsHistoryLimit(value int32) *BuildConfigSpecApplyConfiguration { + b.SuccessfulBuildsHistoryLimit = &value + return b +} + +// WithFailedBuildsHistoryLimit sets the FailedBuildsHistoryLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FailedBuildsHistoryLimit field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithFailedBuildsHistoryLimit(value int32) *BuildConfigSpecApplyConfiguration { + b.FailedBuildsHistoryLimit = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfigstatus.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfigstatus.go new file mode 100644 index 0000000000000..9f63f635f9e56 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfigstatus.go @@ -0,0 +1,37 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// BuildConfigStatusApplyConfiguration represents a declarative configuration of the BuildConfigStatus type for use +// with apply. +type BuildConfigStatusApplyConfiguration struct { + LastVersion *int64 `json:"lastVersion,omitempty"` + ImageChangeTriggers []ImageChangeTriggerStatusApplyConfiguration `json:"imageChangeTriggers,omitempty"` +} + +// BuildConfigStatusApplyConfiguration constructs a declarative configuration of the BuildConfigStatus type for use with +// apply. +func BuildConfigStatus() *BuildConfigStatusApplyConfiguration { + return &BuildConfigStatusApplyConfiguration{} +} + +// WithLastVersion sets the LastVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastVersion field is set to the value of the last call. +func (b *BuildConfigStatusApplyConfiguration) WithLastVersion(value int64) *BuildConfigStatusApplyConfiguration { + b.LastVersion = &value + return b +} + +// WithImageChangeTriggers adds the given value to the ImageChangeTriggers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ImageChangeTriggers field. +func (b *BuildConfigStatusApplyConfiguration) WithImageChangeTriggers(values ...*ImageChangeTriggerStatusApplyConfiguration) *BuildConfigStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithImageChangeTriggers") + } + b.ImageChangeTriggers = append(b.ImageChangeTriggers, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildoutput.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildoutput.go new file mode 100644 index 0000000000000..0becc3d82af33 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildoutput.go @@ -0,0 +1,50 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// BuildOutputApplyConfiguration represents a declarative configuration of the BuildOutput type for use +// with apply. +type BuildOutputApplyConfiguration struct { + To *corev1.ObjectReference `json:"to,omitempty"` + PushSecret *corev1.LocalObjectReference `json:"pushSecret,omitempty"` + ImageLabels []ImageLabelApplyConfiguration `json:"imageLabels,omitempty"` +} + +// BuildOutputApplyConfiguration constructs a declarative configuration of the BuildOutput type for use with +// apply. +func BuildOutput() *BuildOutputApplyConfiguration { + return &BuildOutputApplyConfiguration{} +} + +// WithTo sets the To field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the To field is set to the value of the last call. +func (b *BuildOutputApplyConfiguration) WithTo(value corev1.ObjectReference) *BuildOutputApplyConfiguration { + b.To = &value + return b +} + +// WithPushSecret sets the PushSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PushSecret field is set to the value of the last call. +func (b *BuildOutputApplyConfiguration) WithPushSecret(value corev1.LocalObjectReference) *BuildOutputApplyConfiguration { + b.PushSecret = &value + return b +} + +// WithImageLabels adds the given value to the ImageLabels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ImageLabels field. +func (b *BuildOutputApplyConfiguration) WithImageLabels(values ...*ImageLabelApplyConfiguration) *BuildOutputApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithImageLabels") + } + b.ImageLabels = append(b.ImageLabels, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildpostcommitspec.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildpostcommitspec.go new file mode 100644 index 0000000000000..573e478436141 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildpostcommitspec.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// BuildPostCommitSpecApplyConfiguration represents a declarative configuration of the BuildPostCommitSpec type for use +// with apply. +type BuildPostCommitSpecApplyConfiguration struct { + Command []string `json:"command,omitempty"` + Args []string `json:"args,omitempty"` + Script *string `json:"script,omitempty"` +} + +// BuildPostCommitSpecApplyConfiguration constructs a declarative configuration of the BuildPostCommitSpec type for use with +// apply. +func BuildPostCommitSpec() *BuildPostCommitSpecApplyConfiguration { + return &BuildPostCommitSpecApplyConfiguration{} +} + +// WithCommand adds the given value to the Command field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Command field. +func (b *BuildPostCommitSpecApplyConfiguration) WithCommand(values ...string) *BuildPostCommitSpecApplyConfiguration { + for i := range values { + b.Command = append(b.Command, values[i]) + } + return b +} + +// WithArgs adds the given value to the Args field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Args field. +func (b *BuildPostCommitSpecApplyConfiguration) WithArgs(values ...string) *BuildPostCommitSpecApplyConfiguration { + for i := range values { + b.Args = append(b.Args, values[i]) + } + return b +} + +// WithScript sets the Script field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Script field is set to the value of the last call. +func (b *BuildPostCommitSpecApplyConfiguration) WithScript(value string) *BuildPostCommitSpecApplyConfiguration { + b.Script = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildsource.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildsource.go new file mode 100644 index 0000000000000..d42919a7d8268 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildsource.go @@ -0,0 +1,115 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" + corev1 "k8s.io/api/core/v1" +) + +// BuildSourceApplyConfiguration represents a declarative configuration of the BuildSource type for use +// with apply. +type BuildSourceApplyConfiguration struct { + Type *buildv1.BuildSourceType `json:"type,omitempty"` + Binary *BinaryBuildSourceApplyConfiguration `json:"binary,omitempty"` + Dockerfile *string `json:"dockerfile,omitempty"` + Git *GitBuildSourceApplyConfiguration `json:"git,omitempty"` + Images []ImageSourceApplyConfiguration `json:"images,omitempty"` + ContextDir *string `json:"contextDir,omitempty"` + SourceSecret *corev1.LocalObjectReference `json:"sourceSecret,omitempty"` + Secrets []SecretBuildSourceApplyConfiguration `json:"secrets,omitempty"` + ConfigMaps []ConfigMapBuildSourceApplyConfiguration `json:"configMaps,omitempty"` +} + +// BuildSourceApplyConfiguration constructs a declarative configuration of the BuildSource type for use with +// apply. +func BuildSource() *BuildSourceApplyConfiguration { + return &BuildSourceApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *BuildSourceApplyConfiguration) WithType(value buildv1.BuildSourceType) *BuildSourceApplyConfiguration { + b.Type = &value + return b +} + +// WithBinary sets the Binary field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Binary field is set to the value of the last call. +func (b *BuildSourceApplyConfiguration) WithBinary(value *BinaryBuildSourceApplyConfiguration) *BuildSourceApplyConfiguration { + b.Binary = value + return b +} + +// WithDockerfile sets the Dockerfile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Dockerfile field is set to the value of the last call. +func (b *BuildSourceApplyConfiguration) WithDockerfile(value string) *BuildSourceApplyConfiguration { + b.Dockerfile = &value + return b +} + +// WithGit sets the Git field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Git field is set to the value of the last call. +func (b *BuildSourceApplyConfiguration) WithGit(value *GitBuildSourceApplyConfiguration) *BuildSourceApplyConfiguration { + b.Git = value + return b +} + +// WithImages adds the given value to the Images field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Images field. +func (b *BuildSourceApplyConfiguration) WithImages(values ...*ImageSourceApplyConfiguration) *BuildSourceApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithImages") + } + b.Images = append(b.Images, *values[i]) + } + return b +} + +// WithContextDir sets the ContextDir field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ContextDir field is set to the value of the last call. +func (b *BuildSourceApplyConfiguration) WithContextDir(value string) *BuildSourceApplyConfiguration { + b.ContextDir = &value + return b +} + +// WithSourceSecret sets the SourceSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SourceSecret field is set to the value of the last call. +func (b *BuildSourceApplyConfiguration) WithSourceSecret(value corev1.LocalObjectReference) *BuildSourceApplyConfiguration { + b.SourceSecret = &value + return b +} + +// WithSecrets adds the given value to the Secrets field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Secrets field. +func (b *BuildSourceApplyConfiguration) WithSecrets(values ...*SecretBuildSourceApplyConfiguration) *BuildSourceApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSecrets") + } + b.Secrets = append(b.Secrets, *values[i]) + } + return b +} + +// WithConfigMaps adds the given value to the ConfigMaps field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ConfigMaps field. +func (b *BuildSourceApplyConfiguration) WithConfigMaps(values ...*ConfigMapBuildSourceApplyConfiguration) *BuildSourceApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConfigMaps") + } + b.ConfigMaps = append(b.ConfigMaps, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildspec.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildspec.go new file mode 100644 index 0000000000000..3cb68d7988619 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildspec.go @@ -0,0 +1,114 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" + corev1 "k8s.io/api/core/v1" +) + +// BuildSpecApplyConfiguration represents a declarative configuration of the BuildSpec type for use +// with apply. +type BuildSpecApplyConfiguration struct { + CommonSpecApplyConfiguration `json:",inline"` + TriggeredBy []BuildTriggerCauseApplyConfiguration `json:"triggeredBy,omitempty"` +} + +// BuildSpecApplyConfiguration constructs a declarative configuration of the BuildSpec type for use with +// apply. +func BuildSpec() *BuildSpecApplyConfiguration { + return &BuildSpecApplyConfiguration{} +} + +// WithServiceAccount sets the ServiceAccount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceAccount field is set to the value of the last call. +func (b *BuildSpecApplyConfiguration) WithServiceAccount(value string) *BuildSpecApplyConfiguration { + b.CommonSpecApplyConfiguration.ServiceAccount = &value + return b +} + +// WithSource sets the Source field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Source field is set to the value of the last call. +func (b *BuildSpecApplyConfiguration) WithSource(value *BuildSourceApplyConfiguration) *BuildSpecApplyConfiguration { + b.CommonSpecApplyConfiguration.Source = value + return b +} + +// WithRevision sets the Revision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Revision field is set to the value of the last call. +func (b *BuildSpecApplyConfiguration) WithRevision(value *SourceRevisionApplyConfiguration) *BuildSpecApplyConfiguration { + b.CommonSpecApplyConfiguration.Revision = value + return b +} + +// WithStrategy sets the Strategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Strategy field is set to the value of the last call. +func (b *BuildSpecApplyConfiguration) WithStrategy(value *BuildStrategyApplyConfiguration) *BuildSpecApplyConfiguration { + b.CommonSpecApplyConfiguration.Strategy = value + return b +} + +// WithOutput sets the Output field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Output field is set to the value of the last call. +func (b *BuildSpecApplyConfiguration) WithOutput(value *BuildOutputApplyConfiguration) *BuildSpecApplyConfiguration { + b.CommonSpecApplyConfiguration.Output = value + return b +} + +// WithResources sets the Resources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resources field is set to the value of the last call. +func (b *BuildSpecApplyConfiguration) WithResources(value corev1.ResourceRequirements) *BuildSpecApplyConfiguration { + b.CommonSpecApplyConfiguration.Resources = &value + return b +} + +// WithPostCommit sets the PostCommit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PostCommit field is set to the value of the last call. +func (b *BuildSpecApplyConfiguration) WithPostCommit(value *BuildPostCommitSpecApplyConfiguration) *BuildSpecApplyConfiguration { + b.CommonSpecApplyConfiguration.PostCommit = value + return b +} + +// WithCompletionDeadlineSeconds sets the CompletionDeadlineSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CompletionDeadlineSeconds field is set to the value of the last call. +func (b *BuildSpecApplyConfiguration) WithCompletionDeadlineSeconds(value int64) *BuildSpecApplyConfiguration { + b.CommonSpecApplyConfiguration.CompletionDeadlineSeconds = &value + return b +} + +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeSelector field is set to the value of the last call. +func (b *BuildSpecApplyConfiguration) WithNodeSelector(value buildv1.OptionalNodeSelector) *BuildSpecApplyConfiguration { + b.CommonSpecApplyConfiguration.NodeSelector = &value + return b +} + +// WithMountTrustedCA sets the MountTrustedCA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MountTrustedCA field is set to the value of the last call. +func (b *BuildSpecApplyConfiguration) WithMountTrustedCA(value bool) *BuildSpecApplyConfiguration { + b.CommonSpecApplyConfiguration.MountTrustedCA = &value + return b +} + +// WithTriggeredBy adds the given value to the TriggeredBy field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the TriggeredBy field. +func (b *BuildSpecApplyConfiguration) WithTriggeredBy(values ...*BuildTriggerCauseApplyConfiguration) *BuildSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithTriggeredBy") + } + b.TriggeredBy = append(b.TriggeredBy, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatus.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatus.go new file mode 100644 index 0000000000000..7124372c151fe --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatus.go @@ -0,0 +1,149 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + buildv1 "github.com/openshift/api/build/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// BuildStatusApplyConfiguration represents a declarative configuration of the BuildStatus type for use +// with apply. +type BuildStatusApplyConfiguration struct { + Phase *buildv1.BuildPhase `json:"phase,omitempty"` + Cancelled *bool `json:"cancelled,omitempty"` + Reason *buildv1.StatusReason `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"` + CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty"` + Duration *time.Duration `json:"duration,omitempty"` + OutputDockerImageReference *string `json:"outputDockerImageReference,omitempty"` + Config *corev1.ObjectReference `json:"config,omitempty"` + Output *BuildStatusOutputApplyConfiguration `json:"output,omitempty"` + Stages []StageInfoApplyConfiguration `json:"stages,omitempty"` + LogSnippet *string `json:"logSnippet,omitempty"` + Conditions []BuildConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// BuildStatusApplyConfiguration constructs a declarative configuration of the BuildStatus type for use with +// apply. +func BuildStatus() *BuildStatusApplyConfiguration { + return &BuildStatusApplyConfiguration{} +} + +// WithPhase sets the Phase field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Phase field is set to the value of the last call. +func (b *BuildStatusApplyConfiguration) WithPhase(value buildv1.BuildPhase) *BuildStatusApplyConfiguration { + b.Phase = &value + return b +} + +// WithCancelled sets the Cancelled field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Cancelled field is set to the value of the last call. +func (b *BuildStatusApplyConfiguration) WithCancelled(value bool) *BuildStatusApplyConfiguration { + b.Cancelled = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *BuildStatusApplyConfiguration) WithReason(value buildv1.StatusReason) *BuildStatusApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *BuildStatusApplyConfiguration) WithMessage(value string) *BuildStatusApplyConfiguration { + b.Message = &value + return b +} + +// WithStartTimestamp sets the StartTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StartTimestamp field is set to the value of the last call. +func (b *BuildStatusApplyConfiguration) WithStartTimestamp(value metav1.Time) *BuildStatusApplyConfiguration { + b.StartTimestamp = &value + return b +} + +// WithCompletionTimestamp sets the CompletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CompletionTimestamp field is set to the value of the last call. +func (b *BuildStatusApplyConfiguration) WithCompletionTimestamp(value metav1.Time) *BuildStatusApplyConfiguration { + b.CompletionTimestamp = &value + return b +} + +// WithDuration sets the Duration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Duration field is set to the value of the last call. +func (b *BuildStatusApplyConfiguration) WithDuration(value time.Duration) *BuildStatusApplyConfiguration { + b.Duration = &value + return b +} + +// WithOutputDockerImageReference sets the OutputDockerImageReference field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OutputDockerImageReference field is set to the value of the last call. +func (b *BuildStatusApplyConfiguration) WithOutputDockerImageReference(value string) *BuildStatusApplyConfiguration { + b.OutputDockerImageReference = &value + return b +} + +// WithConfig sets the Config field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Config field is set to the value of the last call. +func (b *BuildStatusApplyConfiguration) WithConfig(value corev1.ObjectReference) *BuildStatusApplyConfiguration { + b.Config = &value + return b +} + +// WithOutput sets the Output field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Output field is set to the value of the last call. +func (b *BuildStatusApplyConfiguration) WithOutput(value *BuildStatusOutputApplyConfiguration) *BuildStatusApplyConfiguration { + b.Output = value + return b +} + +// WithStages adds the given value to the Stages field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Stages field. +func (b *BuildStatusApplyConfiguration) WithStages(values ...*StageInfoApplyConfiguration) *BuildStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithStages") + } + b.Stages = append(b.Stages, *values[i]) + } + return b +} + +// WithLogSnippet sets the LogSnippet field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogSnippet field is set to the value of the last call. +func (b *BuildStatusApplyConfiguration) WithLogSnippet(value string) *BuildStatusApplyConfiguration { + b.LogSnippet = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *BuildStatusApplyConfiguration) WithConditions(values ...*BuildConditionApplyConfiguration) *BuildStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatusoutput.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatusoutput.go new file mode 100644 index 0000000000000..55523d3dfb821 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatusoutput.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// BuildStatusOutputApplyConfiguration represents a declarative configuration of the BuildStatusOutput type for use +// with apply. +type BuildStatusOutputApplyConfiguration struct { + To *BuildStatusOutputToApplyConfiguration `json:"to,omitempty"` +} + +// BuildStatusOutputApplyConfiguration constructs a declarative configuration of the BuildStatusOutput type for use with +// apply. +func BuildStatusOutput() *BuildStatusOutputApplyConfiguration { + return &BuildStatusOutputApplyConfiguration{} +} + +// WithTo sets the To field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the To field is set to the value of the last call. +func (b *BuildStatusOutputApplyConfiguration) WithTo(value *BuildStatusOutputToApplyConfiguration) *BuildStatusOutputApplyConfiguration { + b.To = value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatusoutputto.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatusoutputto.go new file mode 100644 index 0000000000000..44149a6ba7b84 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatusoutputto.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// BuildStatusOutputToApplyConfiguration represents a declarative configuration of the BuildStatusOutputTo type for use +// with apply. +type BuildStatusOutputToApplyConfiguration struct { + ImageDigest *string `json:"imageDigest,omitempty"` +} + +// BuildStatusOutputToApplyConfiguration constructs a declarative configuration of the BuildStatusOutputTo type for use with +// apply. +func BuildStatusOutputTo() *BuildStatusOutputToApplyConfiguration { + return &BuildStatusOutputToApplyConfiguration{} +} + +// WithImageDigest sets the ImageDigest field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageDigest field is set to the value of the last call. +func (b *BuildStatusOutputToApplyConfiguration) WithImageDigest(value string) *BuildStatusOutputToApplyConfiguration { + b.ImageDigest = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstrategy.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstrategy.go new file mode 100644 index 0000000000000..5ad290331fd1d --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstrategy.go @@ -0,0 +1,63 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" +) + +// BuildStrategyApplyConfiguration represents a declarative configuration of the BuildStrategy type for use +// with apply. +type BuildStrategyApplyConfiguration struct { + Type *buildv1.BuildStrategyType `json:"type,omitempty"` + DockerStrategy *DockerBuildStrategyApplyConfiguration `json:"dockerStrategy,omitempty"` + SourceStrategy *SourceBuildStrategyApplyConfiguration `json:"sourceStrategy,omitempty"` + CustomStrategy *CustomBuildStrategyApplyConfiguration `json:"customStrategy,omitempty"` + JenkinsPipelineStrategy *JenkinsPipelineBuildStrategyApplyConfiguration `json:"jenkinsPipelineStrategy,omitempty"` +} + +// BuildStrategyApplyConfiguration constructs a declarative configuration of the BuildStrategy type for use with +// apply. +func BuildStrategy() *BuildStrategyApplyConfiguration { + return &BuildStrategyApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *BuildStrategyApplyConfiguration) WithType(value buildv1.BuildStrategyType) *BuildStrategyApplyConfiguration { + b.Type = &value + return b +} + +// WithDockerStrategy sets the DockerStrategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DockerStrategy field is set to the value of the last call. +func (b *BuildStrategyApplyConfiguration) WithDockerStrategy(value *DockerBuildStrategyApplyConfiguration) *BuildStrategyApplyConfiguration { + b.DockerStrategy = value + return b +} + +// WithSourceStrategy sets the SourceStrategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SourceStrategy field is set to the value of the last call. +func (b *BuildStrategyApplyConfiguration) WithSourceStrategy(value *SourceBuildStrategyApplyConfiguration) *BuildStrategyApplyConfiguration { + b.SourceStrategy = value + return b +} + +// WithCustomStrategy sets the CustomStrategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CustomStrategy field is set to the value of the last call. +func (b *BuildStrategyApplyConfiguration) WithCustomStrategy(value *CustomBuildStrategyApplyConfiguration) *BuildStrategyApplyConfiguration { + b.CustomStrategy = value + return b +} + +// WithJenkinsPipelineStrategy sets the JenkinsPipelineStrategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the JenkinsPipelineStrategy field is set to the value of the last call. +func (b *BuildStrategyApplyConfiguration) WithJenkinsPipelineStrategy(value *JenkinsPipelineBuildStrategyApplyConfiguration) *BuildStrategyApplyConfiguration { + b.JenkinsPipelineStrategy = value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildtriggercause.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildtriggercause.go new file mode 100644 index 0000000000000..47f8f2df5c314 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildtriggercause.go @@ -0,0 +1,68 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// BuildTriggerCauseApplyConfiguration represents a declarative configuration of the BuildTriggerCause type for use +// with apply. +type BuildTriggerCauseApplyConfiguration struct { + Message *string `json:"message,omitempty"` + GenericWebHook *GenericWebHookCauseApplyConfiguration `json:"genericWebHook,omitempty"` + GitHubWebHook *GitHubWebHookCauseApplyConfiguration `json:"githubWebHook,omitempty"` + ImageChangeBuild *ImageChangeCauseApplyConfiguration `json:"imageChangeBuild,omitempty"` + GitLabWebHook *GitLabWebHookCauseApplyConfiguration `json:"gitlabWebHook,omitempty"` + BitbucketWebHook *BitbucketWebHookCauseApplyConfiguration `json:"bitbucketWebHook,omitempty"` +} + +// BuildTriggerCauseApplyConfiguration constructs a declarative configuration of the BuildTriggerCause type for use with +// apply. +func BuildTriggerCause() *BuildTriggerCauseApplyConfiguration { + return &BuildTriggerCauseApplyConfiguration{} +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *BuildTriggerCauseApplyConfiguration) WithMessage(value string) *BuildTriggerCauseApplyConfiguration { + b.Message = &value + return b +} + +// WithGenericWebHook sets the GenericWebHook field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenericWebHook field is set to the value of the last call. +func (b *BuildTriggerCauseApplyConfiguration) WithGenericWebHook(value *GenericWebHookCauseApplyConfiguration) *BuildTriggerCauseApplyConfiguration { + b.GenericWebHook = value + return b +} + +// WithGitHubWebHook sets the GitHubWebHook field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GitHubWebHook field is set to the value of the last call. +func (b *BuildTriggerCauseApplyConfiguration) WithGitHubWebHook(value *GitHubWebHookCauseApplyConfiguration) *BuildTriggerCauseApplyConfiguration { + b.GitHubWebHook = value + return b +} + +// WithImageChangeBuild sets the ImageChangeBuild field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageChangeBuild field is set to the value of the last call. +func (b *BuildTriggerCauseApplyConfiguration) WithImageChangeBuild(value *ImageChangeCauseApplyConfiguration) *BuildTriggerCauseApplyConfiguration { + b.ImageChangeBuild = value + return b +} + +// WithGitLabWebHook sets the GitLabWebHook field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GitLabWebHook field is set to the value of the last call. +func (b *BuildTriggerCauseApplyConfiguration) WithGitLabWebHook(value *GitLabWebHookCauseApplyConfiguration) *BuildTriggerCauseApplyConfiguration { + b.GitLabWebHook = value + return b +} + +// WithBitbucketWebHook sets the BitbucketWebHook field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BitbucketWebHook field is set to the value of the last call. +func (b *BuildTriggerCauseApplyConfiguration) WithBitbucketWebHook(value *BitbucketWebHookCauseApplyConfiguration) *BuildTriggerCauseApplyConfiguration { + b.BitbucketWebHook = value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildtriggerpolicy.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildtriggerpolicy.go new file mode 100644 index 0000000000000..4cbcbff4842e2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildtriggerpolicy.go @@ -0,0 +1,72 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" +) + +// BuildTriggerPolicyApplyConfiguration represents a declarative configuration of the BuildTriggerPolicy type for use +// with apply. +type BuildTriggerPolicyApplyConfiguration struct { + Type *buildv1.BuildTriggerType `json:"type,omitempty"` + GitHubWebHook *WebHookTriggerApplyConfiguration `json:"github,omitempty"` + GenericWebHook *WebHookTriggerApplyConfiguration `json:"generic,omitempty"` + ImageChange *ImageChangeTriggerApplyConfiguration `json:"imageChange,omitempty"` + GitLabWebHook *WebHookTriggerApplyConfiguration `json:"gitlab,omitempty"` + BitbucketWebHook *WebHookTriggerApplyConfiguration `json:"bitbucket,omitempty"` +} + +// BuildTriggerPolicyApplyConfiguration constructs a declarative configuration of the BuildTriggerPolicy type for use with +// apply. +func BuildTriggerPolicy() *BuildTriggerPolicyApplyConfiguration { + return &BuildTriggerPolicyApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *BuildTriggerPolicyApplyConfiguration) WithType(value buildv1.BuildTriggerType) *BuildTriggerPolicyApplyConfiguration { + b.Type = &value + return b +} + +// WithGitHubWebHook sets the GitHubWebHook field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GitHubWebHook field is set to the value of the last call. +func (b *BuildTriggerPolicyApplyConfiguration) WithGitHubWebHook(value *WebHookTriggerApplyConfiguration) *BuildTriggerPolicyApplyConfiguration { + b.GitHubWebHook = value + return b +} + +// WithGenericWebHook sets the GenericWebHook field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenericWebHook field is set to the value of the last call. +func (b *BuildTriggerPolicyApplyConfiguration) WithGenericWebHook(value *WebHookTriggerApplyConfiguration) *BuildTriggerPolicyApplyConfiguration { + b.GenericWebHook = value + return b +} + +// WithImageChange sets the ImageChange field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageChange field is set to the value of the last call. +func (b *BuildTriggerPolicyApplyConfiguration) WithImageChange(value *ImageChangeTriggerApplyConfiguration) *BuildTriggerPolicyApplyConfiguration { + b.ImageChange = value + return b +} + +// WithGitLabWebHook sets the GitLabWebHook field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GitLabWebHook field is set to the value of the last call. +func (b *BuildTriggerPolicyApplyConfiguration) WithGitLabWebHook(value *WebHookTriggerApplyConfiguration) *BuildTriggerPolicyApplyConfiguration { + b.GitLabWebHook = value + return b +} + +// WithBitbucketWebHook sets the BitbucketWebHook field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BitbucketWebHook field is set to the value of the last call. +func (b *BuildTriggerPolicyApplyConfiguration) WithBitbucketWebHook(value *WebHookTriggerApplyConfiguration) *BuildTriggerPolicyApplyConfiguration { + b.BitbucketWebHook = value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolume.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolume.go new file mode 100644 index 0000000000000..07cb53bf25dd9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolume.go @@ -0,0 +1,46 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// BuildVolumeApplyConfiguration represents a declarative configuration of the BuildVolume type for use +// with apply. +type BuildVolumeApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Source *BuildVolumeSourceApplyConfiguration `json:"source,omitempty"` + Mounts []BuildVolumeMountApplyConfiguration `json:"mounts,omitempty"` +} + +// BuildVolumeApplyConfiguration constructs a declarative configuration of the BuildVolume type for use with +// apply. +func BuildVolume() *BuildVolumeApplyConfiguration { + return &BuildVolumeApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *BuildVolumeApplyConfiguration) WithName(value string) *BuildVolumeApplyConfiguration { + b.Name = &value + return b +} + +// WithSource sets the Source field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Source field is set to the value of the last call. +func (b *BuildVolumeApplyConfiguration) WithSource(value *BuildVolumeSourceApplyConfiguration) *BuildVolumeApplyConfiguration { + b.Source = value + return b +} + +// WithMounts adds the given value to the Mounts field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Mounts field. +func (b *BuildVolumeApplyConfiguration) WithMounts(values ...*BuildVolumeMountApplyConfiguration) *BuildVolumeApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMounts") + } + b.Mounts = append(b.Mounts, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolumemount.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolumemount.go new file mode 100644 index 0000000000000..521664df32889 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolumemount.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// BuildVolumeMountApplyConfiguration represents a declarative configuration of the BuildVolumeMount type for use +// with apply. +type BuildVolumeMountApplyConfiguration struct { + DestinationPath *string `json:"destinationPath,omitempty"` +} + +// BuildVolumeMountApplyConfiguration constructs a declarative configuration of the BuildVolumeMount type for use with +// apply. +func BuildVolumeMount() *BuildVolumeMountApplyConfiguration { + return &BuildVolumeMountApplyConfiguration{} +} + +// WithDestinationPath sets the DestinationPath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DestinationPath field is set to the value of the last call. +func (b *BuildVolumeMountApplyConfiguration) WithDestinationPath(value string) *BuildVolumeMountApplyConfiguration { + b.DestinationPath = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolumesource.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolumesource.go new file mode 100644 index 0000000000000..96e8f7a1c2073 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolumesource.go @@ -0,0 +1,55 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" + corev1 "k8s.io/api/core/v1" +) + +// BuildVolumeSourceApplyConfiguration represents a declarative configuration of the BuildVolumeSource type for use +// with apply. +type BuildVolumeSourceApplyConfiguration struct { + Type *buildv1.BuildVolumeSourceType `json:"type,omitempty"` + Secret *corev1.SecretVolumeSource `json:"secret,omitempty"` + ConfigMap *corev1.ConfigMapVolumeSource `json:"configMap,omitempty"` + CSI *corev1.CSIVolumeSource `json:"csi,omitempty"` +} + +// BuildVolumeSourceApplyConfiguration constructs a declarative configuration of the BuildVolumeSource type for use with +// apply. +func BuildVolumeSource() *BuildVolumeSourceApplyConfiguration { + return &BuildVolumeSourceApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *BuildVolumeSourceApplyConfiguration) WithType(value buildv1.BuildVolumeSourceType) *BuildVolumeSourceApplyConfiguration { + b.Type = &value + return b +} + +// WithSecret sets the Secret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Secret field is set to the value of the last call. +func (b *BuildVolumeSourceApplyConfiguration) WithSecret(value corev1.SecretVolumeSource) *BuildVolumeSourceApplyConfiguration { + b.Secret = &value + return b +} + +// WithConfigMap sets the ConfigMap field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConfigMap field is set to the value of the last call. +func (b *BuildVolumeSourceApplyConfiguration) WithConfigMap(value corev1.ConfigMapVolumeSource) *BuildVolumeSourceApplyConfiguration { + b.ConfigMap = &value + return b +} + +// WithCSI sets the CSI field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CSI field is set to the value of the last call. +func (b *BuildVolumeSourceApplyConfiguration) WithCSI(value corev1.CSIVolumeSource) *BuildVolumeSourceApplyConfiguration { + b.CSI = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/commonspec.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/commonspec.go new file mode 100644 index 0000000000000..fc02e12c473ab --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/commonspec.go @@ -0,0 +1,109 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" + corev1 "k8s.io/api/core/v1" +) + +// CommonSpecApplyConfiguration represents a declarative configuration of the CommonSpec type for use +// with apply. +type CommonSpecApplyConfiguration struct { + ServiceAccount *string `json:"serviceAccount,omitempty"` + Source *BuildSourceApplyConfiguration `json:"source,omitempty"` + Revision *SourceRevisionApplyConfiguration `json:"revision,omitempty"` + Strategy *BuildStrategyApplyConfiguration `json:"strategy,omitempty"` + Output *BuildOutputApplyConfiguration `json:"output,omitempty"` + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + PostCommit *BuildPostCommitSpecApplyConfiguration `json:"postCommit,omitempty"` + CompletionDeadlineSeconds *int64 `json:"completionDeadlineSeconds,omitempty"` + NodeSelector *buildv1.OptionalNodeSelector `json:"nodeSelector,omitempty"` + MountTrustedCA *bool `json:"mountTrustedCA,omitempty"` +} + +// CommonSpecApplyConfiguration constructs a declarative configuration of the CommonSpec type for use with +// apply. +func CommonSpec() *CommonSpecApplyConfiguration { + return &CommonSpecApplyConfiguration{} +} + +// WithServiceAccount sets the ServiceAccount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceAccount field is set to the value of the last call. +func (b *CommonSpecApplyConfiguration) WithServiceAccount(value string) *CommonSpecApplyConfiguration { + b.ServiceAccount = &value + return b +} + +// WithSource sets the Source field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Source field is set to the value of the last call. +func (b *CommonSpecApplyConfiguration) WithSource(value *BuildSourceApplyConfiguration) *CommonSpecApplyConfiguration { + b.Source = value + return b +} + +// WithRevision sets the Revision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Revision field is set to the value of the last call. +func (b *CommonSpecApplyConfiguration) WithRevision(value *SourceRevisionApplyConfiguration) *CommonSpecApplyConfiguration { + b.Revision = value + return b +} + +// WithStrategy sets the Strategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Strategy field is set to the value of the last call. +func (b *CommonSpecApplyConfiguration) WithStrategy(value *BuildStrategyApplyConfiguration) *CommonSpecApplyConfiguration { + b.Strategy = value + return b +} + +// WithOutput sets the Output field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Output field is set to the value of the last call. +func (b *CommonSpecApplyConfiguration) WithOutput(value *BuildOutputApplyConfiguration) *CommonSpecApplyConfiguration { + b.Output = value + return b +} + +// WithResources sets the Resources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resources field is set to the value of the last call. +func (b *CommonSpecApplyConfiguration) WithResources(value corev1.ResourceRequirements) *CommonSpecApplyConfiguration { + b.Resources = &value + return b +} + +// WithPostCommit sets the PostCommit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PostCommit field is set to the value of the last call. +func (b *CommonSpecApplyConfiguration) WithPostCommit(value *BuildPostCommitSpecApplyConfiguration) *CommonSpecApplyConfiguration { + b.PostCommit = value + return b +} + +// WithCompletionDeadlineSeconds sets the CompletionDeadlineSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CompletionDeadlineSeconds field is set to the value of the last call. +func (b *CommonSpecApplyConfiguration) WithCompletionDeadlineSeconds(value int64) *CommonSpecApplyConfiguration { + b.CompletionDeadlineSeconds = &value + return b +} + +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeSelector field is set to the value of the last call. +func (b *CommonSpecApplyConfiguration) WithNodeSelector(value buildv1.OptionalNodeSelector) *CommonSpecApplyConfiguration { + b.NodeSelector = &value + return b +} + +// WithMountTrustedCA sets the MountTrustedCA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MountTrustedCA field is set to the value of the last call. +func (b *CommonSpecApplyConfiguration) WithMountTrustedCA(value bool) *CommonSpecApplyConfiguration { + b.MountTrustedCA = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/commonwebhookcause.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/commonwebhookcause.go new file mode 100644 index 0000000000000..793dd7a0160c2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/commonwebhookcause.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// CommonWebHookCauseApplyConfiguration represents a declarative configuration of the CommonWebHookCause type for use +// with apply. +type CommonWebHookCauseApplyConfiguration struct { + Revision *SourceRevisionApplyConfiguration `json:"revision,omitempty"` + Secret *string `json:"secret,omitempty"` +} + +// CommonWebHookCauseApplyConfiguration constructs a declarative configuration of the CommonWebHookCause type for use with +// apply. +func CommonWebHookCause() *CommonWebHookCauseApplyConfiguration { + return &CommonWebHookCauseApplyConfiguration{} +} + +// WithRevision sets the Revision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Revision field is set to the value of the last call. +func (b *CommonWebHookCauseApplyConfiguration) WithRevision(value *SourceRevisionApplyConfiguration) *CommonWebHookCauseApplyConfiguration { + b.Revision = value + return b +} + +// WithSecret sets the Secret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Secret field is set to the value of the last call. +func (b *CommonWebHookCauseApplyConfiguration) WithSecret(value string) *CommonWebHookCauseApplyConfiguration { + b.Secret = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/configmapbuildsource.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/configmapbuildsource.go new file mode 100644 index 0000000000000..420b86656f935 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/configmapbuildsource.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// ConfigMapBuildSourceApplyConfiguration represents a declarative configuration of the ConfigMapBuildSource type for use +// with apply. +type ConfigMapBuildSourceApplyConfiguration struct { + ConfigMap *corev1.LocalObjectReference `json:"configMap,omitempty"` + DestinationDir *string `json:"destinationDir,omitempty"` +} + +// ConfigMapBuildSourceApplyConfiguration constructs a declarative configuration of the ConfigMapBuildSource type for use with +// apply. +func ConfigMapBuildSource() *ConfigMapBuildSourceApplyConfiguration { + return &ConfigMapBuildSourceApplyConfiguration{} +} + +// WithConfigMap sets the ConfigMap field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConfigMap field is set to the value of the last call. +func (b *ConfigMapBuildSourceApplyConfiguration) WithConfigMap(value corev1.LocalObjectReference) *ConfigMapBuildSourceApplyConfiguration { + b.ConfigMap = &value + return b +} + +// WithDestinationDir sets the DestinationDir field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DestinationDir field is set to the value of the last call. +func (b *ConfigMapBuildSourceApplyConfiguration) WithDestinationDir(value string) *ConfigMapBuildSourceApplyConfiguration { + b.DestinationDir = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/custombuildstrategy.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/custombuildstrategy.go new file mode 100644 index 0000000000000..b445ef5c24c4b --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/custombuildstrategy.go @@ -0,0 +1,88 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// CustomBuildStrategyApplyConfiguration represents a declarative configuration of the CustomBuildStrategy type for use +// with apply. +type CustomBuildStrategyApplyConfiguration struct { + From *corev1.ObjectReference `json:"from,omitempty"` + PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty"` + Env []corev1.EnvVar `json:"env,omitempty"` + ExposeDockerSocket *bool `json:"exposeDockerSocket,omitempty"` + ForcePull *bool `json:"forcePull,omitempty"` + Secrets []SecretSpecApplyConfiguration `json:"secrets,omitempty"` + BuildAPIVersion *string `json:"buildAPIVersion,omitempty"` +} + +// CustomBuildStrategyApplyConfiguration constructs a declarative configuration of the CustomBuildStrategy type for use with +// apply. +func CustomBuildStrategy() *CustomBuildStrategyApplyConfiguration { + return &CustomBuildStrategyApplyConfiguration{} +} + +// WithFrom sets the From field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the From field is set to the value of the last call. +func (b *CustomBuildStrategyApplyConfiguration) WithFrom(value corev1.ObjectReference) *CustomBuildStrategyApplyConfiguration { + b.From = &value + return b +} + +// WithPullSecret sets the PullSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PullSecret field is set to the value of the last call. +func (b *CustomBuildStrategyApplyConfiguration) WithPullSecret(value corev1.LocalObjectReference) *CustomBuildStrategyApplyConfiguration { + b.PullSecret = &value + return b +} + +// WithEnv adds the given value to the Env field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Env field. +func (b *CustomBuildStrategyApplyConfiguration) WithEnv(values ...corev1.EnvVar) *CustomBuildStrategyApplyConfiguration { + for i := range values { + b.Env = append(b.Env, values[i]) + } + return b +} + +// WithExposeDockerSocket sets the ExposeDockerSocket field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExposeDockerSocket field is set to the value of the last call. +func (b *CustomBuildStrategyApplyConfiguration) WithExposeDockerSocket(value bool) *CustomBuildStrategyApplyConfiguration { + b.ExposeDockerSocket = &value + return b +} + +// WithForcePull sets the ForcePull field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ForcePull field is set to the value of the last call. +func (b *CustomBuildStrategyApplyConfiguration) WithForcePull(value bool) *CustomBuildStrategyApplyConfiguration { + b.ForcePull = &value + return b +} + +// WithSecrets adds the given value to the Secrets field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Secrets field. +func (b *CustomBuildStrategyApplyConfiguration) WithSecrets(values ...*SecretSpecApplyConfiguration) *CustomBuildStrategyApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSecrets") + } + b.Secrets = append(b.Secrets, *values[i]) + } + return b +} + +// WithBuildAPIVersion sets the BuildAPIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BuildAPIVersion field is set to the value of the last call. +func (b *CustomBuildStrategyApplyConfiguration) WithBuildAPIVersion(value string) *CustomBuildStrategyApplyConfiguration { + b.BuildAPIVersion = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/dockerbuildstrategy.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/dockerbuildstrategy.go new file mode 100644 index 0000000000000..c37b95463fe33 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/dockerbuildstrategy.go @@ -0,0 +1,109 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" + corev1 "k8s.io/api/core/v1" +) + +// DockerBuildStrategyApplyConfiguration represents a declarative configuration of the DockerBuildStrategy type for use +// with apply. +type DockerBuildStrategyApplyConfiguration struct { + From *corev1.ObjectReference `json:"from,omitempty"` + PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty"` + NoCache *bool `json:"noCache,omitempty"` + Env []corev1.EnvVar `json:"env,omitempty"` + ForcePull *bool `json:"forcePull,omitempty"` + DockerfilePath *string `json:"dockerfilePath,omitempty"` + BuildArgs []corev1.EnvVar `json:"buildArgs,omitempty"` + ImageOptimizationPolicy *buildv1.ImageOptimizationPolicy `json:"imageOptimizationPolicy,omitempty"` + Volumes []BuildVolumeApplyConfiguration `json:"volumes,omitempty"` +} + +// DockerBuildStrategyApplyConfiguration constructs a declarative configuration of the DockerBuildStrategy type for use with +// apply. +func DockerBuildStrategy() *DockerBuildStrategyApplyConfiguration { + return &DockerBuildStrategyApplyConfiguration{} +} + +// WithFrom sets the From field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the From field is set to the value of the last call. +func (b *DockerBuildStrategyApplyConfiguration) WithFrom(value corev1.ObjectReference) *DockerBuildStrategyApplyConfiguration { + b.From = &value + return b +} + +// WithPullSecret sets the PullSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PullSecret field is set to the value of the last call. +func (b *DockerBuildStrategyApplyConfiguration) WithPullSecret(value corev1.LocalObjectReference) *DockerBuildStrategyApplyConfiguration { + b.PullSecret = &value + return b +} + +// WithNoCache sets the NoCache field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NoCache field is set to the value of the last call. +func (b *DockerBuildStrategyApplyConfiguration) WithNoCache(value bool) *DockerBuildStrategyApplyConfiguration { + b.NoCache = &value + return b +} + +// WithEnv adds the given value to the Env field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Env field. +func (b *DockerBuildStrategyApplyConfiguration) WithEnv(values ...corev1.EnvVar) *DockerBuildStrategyApplyConfiguration { + for i := range values { + b.Env = append(b.Env, values[i]) + } + return b +} + +// WithForcePull sets the ForcePull field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ForcePull field is set to the value of the last call. +func (b *DockerBuildStrategyApplyConfiguration) WithForcePull(value bool) *DockerBuildStrategyApplyConfiguration { + b.ForcePull = &value + return b +} + +// WithDockerfilePath sets the DockerfilePath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DockerfilePath field is set to the value of the last call. +func (b *DockerBuildStrategyApplyConfiguration) WithDockerfilePath(value string) *DockerBuildStrategyApplyConfiguration { + b.DockerfilePath = &value + return b +} + +// WithBuildArgs adds the given value to the BuildArgs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the BuildArgs field. +func (b *DockerBuildStrategyApplyConfiguration) WithBuildArgs(values ...corev1.EnvVar) *DockerBuildStrategyApplyConfiguration { + for i := range values { + b.BuildArgs = append(b.BuildArgs, values[i]) + } + return b +} + +// WithImageOptimizationPolicy sets the ImageOptimizationPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageOptimizationPolicy field is set to the value of the last call. +func (b *DockerBuildStrategyApplyConfiguration) WithImageOptimizationPolicy(value buildv1.ImageOptimizationPolicy) *DockerBuildStrategyApplyConfiguration { + b.ImageOptimizationPolicy = &value + return b +} + +// WithVolumes adds the given value to the Volumes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Volumes field. +func (b *DockerBuildStrategyApplyConfiguration) WithVolumes(values ...*BuildVolumeApplyConfiguration) *DockerBuildStrategyApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithVolumes") + } + b.Volumes = append(b.Volumes, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/genericwebhookcause.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/genericwebhookcause.go new file mode 100644 index 0000000000000..b11d67359e67e --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/genericwebhookcause.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// GenericWebHookCauseApplyConfiguration represents a declarative configuration of the GenericWebHookCause type for use +// with apply. +type GenericWebHookCauseApplyConfiguration struct { + Revision *SourceRevisionApplyConfiguration `json:"revision,omitempty"` + Secret *string `json:"secret,omitempty"` +} + +// GenericWebHookCauseApplyConfiguration constructs a declarative configuration of the GenericWebHookCause type for use with +// apply. +func GenericWebHookCause() *GenericWebHookCauseApplyConfiguration { + return &GenericWebHookCauseApplyConfiguration{} +} + +// WithRevision sets the Revision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Revision field is set to the value of the last call. +func (b *GenericWebHookCauseApplyConfiguration) WithRevision(value *SourceRevisionApplyConfiguration) *GenericWebHookCauseApplyConfiguration { + b.Revision = value + return b +} + +// WithSecret sets the Secret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Secret field is set to the value of the last call. +func (b *GenericWebHookCauseApplyConfiguration) WithSecret(value string) *GenericWebHookCauseApplyConfiguration { + b.Secret = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/gitbuildsource.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/gitbuildsource.go new file mode 100644 index 0000000000000..f0426254bf865 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/gitbuildsource.go @@ -0,0 +1,57 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// GitBuildSourceApplyConfiguration represents a declarative configuration of the GitBuildSource type for use +// with apply. +type GitBuildSourceApplyConfiguration struct { + URI *string `json:"uri,omitempty"` + Ref *string `json:"ref,omitempty"` + ProxyConfigApplyConfiguration `json:",inline"` +} + +// GitBuildSourceApplyConfiguration constructs a declarative configuration of the GitBuildSource type for use with +// apply. +func GitBuildSource() *GitBuildSourceApplyConfiguration { + return &GitBuildSourceApplyConfiguration{} +} + +// WithURI sets the URI field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the URI field is set to the value of the last call. +func (b *GitBuildSourceApplyConfiguration) WithURI(value string) *GitBuildSourceApplyConfiguration { + b.URI = &value + return b +} + +// WithRef sets the Ref field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Ref field is set to the value of the last call. +func (b *GitBuildSourceApplyConfiguration) WithRef(value string) *GitBuildSourceApplyConfiguration { + b.Ref = &value + return b +} + +// WithHTTPProxy sets the HTTPProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPProxy field is set to the value of the last call. +func (b *GitBuildSourceApplyConfiguration) WithHTTPProxy(value string) *GitBuildSourceApplyConfiguration { + b.ProxyConfigApplyConfiguration.HTTPProxy = &value + return b +} + +// WithHTTPSProxy sets the HTTPSProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPSProxy field is set to the value of the last call. +func (b *GitBuildSourceApplyConfiguration) WithHTTPSProxy(value string) *GitBuildSourceApplyConfiguration { + b.ProxyConfigApplyConfiguration.HTTPSProxy = &value + return b +} + +// WithNoProxy sets the NoProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NoProxy field is set to the value of the last call. +func (b *GitBuildSourceApplyConfiguration) WithNoProxy(value string) *GitBuildSourceApplyConfiguration { + b.ProxyConfigApplyConfiguration.NoProxy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/githubwebhookcause.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/githubwebhookcause.go new file mode 100644 index 0000000000000..7aef57d741a13 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/githubwebhookcause.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// GitHubWebHookCauseApplyConfiguration represents a declarative configuration of the GitHubWebHookCause type for use +// with apply. +type GitHubWebHookCauseApplyConfiguration struct { + Revision *SourceRevisionApplyConfiguration `json:"revision,omitempty"` + Secret *string `json:"secret,omitempty"` +} + +// GitHubWebHookCauseApplyConfiguration constructs a declarative configuration of the GitHubWebHookCause type for use with +// apply. +func GitHubWebHookCause() *GitHubWebHookCauseApplyConfiguration { + return &GitHubWebHookCauseApplyConfiguration{} +} + +// WithRevision sets the Revision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Revision field is set to the value of the last call. +func (b *GitHubWebHookCauseApplyConfiguration) WithRevision(value *SourceRevisionApplyConfiguration) *GitHubWebHookCauseApplyConfiguration { + b.Revision = value + return b +} + +// WithSecret sets the Secret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Secret field is set to the value of the last call. +func (b *GitHubWebHookCauseApplyConfiguration) WithSecret(value string) *GitHubWebHookCauseApplyConfiguration { + b.Secret = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/gitlabwebhookcause.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/gitlabwebhookcause.go new file mode 100644 index 0000000000000..db55e91b8d73a --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/gitlabwebhookcause.go @@ -0,0 +1,31 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// GitLabWebHookCauseApplyConfiguration represents a declarative configuration of the GitLabWebHookCause type for use +// with apply. +type GitLabWebHookCauseApplyConfiguration struct { + CommonWebHookCauseApplyConfiguration `json:",inline"` +} + +// GitLabWebHookCauseApplyConfiguration constructs a declarative configuration of the GitLabWebHookCause type for use with +// apply. +func GitLabWebHookCause() *GitLabWebHookCauseApplyConfiguration { + return &GitLabWebHookCauseApplyConfiguration{} +} + +// WithRevision sets the Revision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Revision field is set to the value of the last call. +func (b *GitLabWebHookCauseApplyConfiguration) WithRevision(value *SourceRevisionApplyConfiguration) *GitLabWebHookCauseApplyConfiguration { + b.CommonWebHookCauseApplyConfiguration.Revision = value + return b +} + +// WithSecret sets the Secret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Secret field is set to the value of the last call. +func (b *GitLabWebHookCauseApplyConfiguration) WithSecret(value string) *GitLabWebHookCauseApplyConfiguration { + b.CommonWebHookCauseApplyConfiguration.Secret = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/gitsourcerevision.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/gitsourcerevision.go new file mode 100644 index 0000000000000..ffe7bcf8e4ed0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/gitsourcerevision.go @@ -0,0 +1,50 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// GitSourceRevisionApplyConfiguration represents a declarative configuration of the GitSourceRevision type for use +// with apply. +type GitSourceRevisionApplyConfiguration struct { + Commit *string `json:"commit,omitempty"` + Author *SourceControlUserApplyConfiguration `json:"author,omitempty"` + Committer *SourceControlUserApplyConfiguration `json:"committer,omitempty"` + Message *string `json:"message,omitempty"` +} + +// GitSourceRevisionApplyConfiguration constructs a declarative configuration of the GitSourceRevision type for use with +// apply. +func GitSourceRevision() *GitSourceRevisionApplyConfiguration { + return &GitSourceRevisionApplyConfiguration{} +} + +// WithCommit sets the Commit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Commit field is set to the value of the last call. +func (b *GitSourceRevisionApplyConfiguration) WithCommit(value string) *GitSourceRevisionApplyConfiguration { + b.Commit = &value + return b +} + +// WithAuthor sets the Author field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Author field is set to the value of the last call. +func (b *GitSourceRevisionApplyConfiguration) WithAuthor(value *SourceControlUserApplyConfiguration) *GitSourceRevisionApplyConfiguration { + b.Author = value + return b +} + +// WithCommitter sets the Committer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Committer field is set to the value of the last call. +func (b *GitSourceRevisionApplyConfiguration) WithCommitter(value *SourceControlUserApplyConfiguration) *GitSourceRevisionApplyConfiguration { + b.Committer = value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *GitSourceRevisionApplyConfiguration) WithMessage(value string) *GitSourceRevisionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangecause.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangecause.go new file mode 100644 index 0000000000000..7c0bf8ca3fb7e --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangecause.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// ImageChangeCauseApplyConfiguration represents a declarative configuration of the ImageChangeCause type for use +// with apply. +type ImageChangeCauseApplyConfiguration struct { + ImageID *string `json:"imageID,omitempty"` + FromRef *corev1.ObjectReference `json:"fromRef,omitempty"` +} + +// ImageChangeCauseApplyConfiguration constructs a declarative configuration of the ImageChangeCause type for use with +// apply. +func ImageChangeCause() *ImageChangeCauseApplyConfiguration { + return &ImageChangeCauseApplyConfiguration{} +} + +// WithImageID sets the ImageID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageID field is set to the value of the last call. +func (b *ImageChangeCauseApplyConfiguration) WithImageID(value string) *ImageChangeCauseApplyConfiguration { + b.ImageID = &value + return b +} + +// WithFromRef sets the FromRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FromRef field is set to the value of the last call. +func (b *ImageChangeCauseApplyConfiguration) WithFromRef(value corev1.ObjectReference) *ImageChangeCauseApplyConfiguration { + b.FromRef = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangetrigger.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangetrigger.go new file mode 100644 index 0000000000000..fe8b27ade8c10 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangetrigger.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// ImageChangeTriggerApplyConfiguration represents a declarative configuration of the ImageChangeTrigger type for use +// with apply. +type ImageChangeTriggerApplyConfiguration struct { + LastTriggeredImageID *string `json:"lastTriggeredImageID,omitempty"` + From *corev1.ObjectReference `json:"from,omitempty"` + Paused *bool `json:"paused,omitempty"` +} + +// ImageChangeTriggerApplyConfiguration constructs a declarative configuration of the ImageChangeTrigger type for use with +// apply. +func ImageChangeTrigger() *ImageChangeTriggerApplyConfiguration { + return &ImageChangeTriggerApplyConfiguration{} +} + +// WithLastTriggeredImageID sets the LastTriggeredImageID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTriggeredImageID field is set to the value of the last call. +func (b *ImageChangeTriggerApplyConfiguration) WithLastTriggeredImageID(value string) *ImageChangeTriggerApplyConfiguration { + b.LastTriggeredImageID = &value + return b +} + +// WithFrom sets the From field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the From field is set to the value of the last call. +func (b *ImageChangeTriggerApplyConfiguration) WithFrom(value corev1.ObjectReference) *ImageChangeTriggerApplyConfiguration { + b.From = &value + return b +} + +// WithPaused sets the Paused field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Paused field is set to the value of the last call. +func (b *ImageChangeTriggerApplyConfiguration) WithPaused(value bool) *ImageChangeTriggerApplyConfiguration { + b.Paused = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangetriggerstatus.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangetriggerstatus.go new file mode 100644 index 0000000000000..ecaf5aab1367e --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangetriggerstatus.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ImageChangeTriggerStatusApplyConfiguration represents a declarative configuration of the ImageChangeTriggerStatus type for use +// with apply. +type ImageChangeTriggerStatusApplyConfiguration struct { + LastTriggeredImageID *string `json:"lastTriggeredImageID,omitempty"` + From *ImageStreamTagReferenceApplyConfiguration `json:"from,omitempty"` + LastTriggerTime *metav1.Time `json:"lastTriggerTime,omitempty"` +} + +// ImageChangeTriggerStatusApplyConfiguration constructs a declarative configuration of the ImageChangeTriggerStatus type for use with +// apply. +func ImageChangeTriggerStatus() *ImageChangeTriggerStatusApplyConfiguration { + return &ImageChangeTriggerStatusApplyConfiguration{} +} + +// WithLastTriggeredImageID sets the LastTriggeredImageID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTriggeredImageID field is set to the value of the last call. +func (b *ImageChangeTriggerStatusApplyConfiguration) WithLastTriggeredImageID(value string) *ImageChangeTriggerStatusApplyConfiguration { + b.LastTriggeredImageID = &value + return b +} + +// WithFrom sets the From field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the From field is set to the value of the last call. +func (b *ImageChangeTriggerStatusApplyConfiguration) WithFrom(value *ImageStreamTagReferenceApplyConfiguration) *ImageChangeTriggerStatusApplyConfiguration { + b.From = value + return b +} + +// WithLastTriggerTime sets the LastTriggerTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTriggerTime field is set to the value of the last call. +func (b *ImageChangeTriggerStatusApplyConfiguration) WithLastTriggerTime(value metav1.Time) *ImageChangeTriggerStatusApplyConfiguration { + b.LastTriggerTime = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagelabel.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagelabel.go new file mode 100644 index 0000000000000..1d19105474467 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagelabel.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ImageLabelApplyConfiguration represents a declarative configuration of the ImageLabel type for use +// with apply. +type ImageLabelApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} + +// ImageLabelApplyConfiguration constructs a declarative configuration of the ImageLabel type for use with +// apply. +func ImageLabel() *ImageLabelApplyConfiguration { + return &ImageLabelApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImageLabelApplyConfiguration) WithName(value string) *ImageLabelApplyConfiguration { + b.Name = &value + return b +} + +// WithValue sets the Value field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Value field is set to the value of the last call. +func (b *ImageLabelApplyConfiguration) WithValue(value string) *ImageLabelApplyConfiguration { + b.Value = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagesource.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagesource.go new file mode 100644 index 0000000000000..fb46f9affa785 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagesource.go @@ -0,0 +1,61 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// ImageSourceApplyConfiguration represents a declarative configuration of the ImageSource type for use +// with apply. +type ImageSourceApplyConfiguration struct { + From *corev1.ObjectReference `json:"from,omitempty"` + As []string `json:"as,omitempty"` + Paths []ImageSourcePathApplyConfiguration `json:"paths,omitempty"` + PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty"` +} + +// ImageSourceApplyConfiguration constructs a declarative configuration of the ImageSource type for use with +// apply. +func ImageSource() *ImageSourceApplyConfiguration { + return &ImageSourceApplyConfiguration{} +} + +// WithFrom sets the From field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the From field is set to the value of the last call. +func (b *ImageSourceApplyConfiguration) WithFrom(value corev1.ObjectReference) *ImageSourceApplyConfiguration { + b.From = &value + return b +} + +// WithAs adds the given value to the As field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the As field. +func (b *ImageSourceApplyConfiguration) WithAs(values ...string) *ImageSourceApplyConfiguration { + for i := range values { + b.As = append(b.As, values[i]) + } + return b +} + +// WithPaths adds the given value to the Paths field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Paths field. +func (b *ImageSourceApplyConfiguration) WithPaths(values ...*ImageSourcePathApplyConfiguration) *ImageSourceApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPaths") + } + b.Paths = append(b.Paths, *values[i]) + } + return b +} + +// WithPullSecret sets the PullSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PullSecret field is set to the value of the last call. +func (b *ImageSourceApplyConfiguration) WithPullSecret(value corev1.LocalObjectReference) *ImageSourceApplyConfiguration { + b.PullSecret = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagesourcepath.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagesourcepath.go new file mode 100644 index 0000000000000..d212ffd32f0f0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagesourcepath.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ImageSourcePathApplyConfiguration represents a declarative configuration of the ImageSourcePath type for use +// with apply. +type ImageSourcePathApplyConfiguration struct { + SourcePath *string `json:"sourcePath,omitempty"` + DestinationDir *string `json:"destinationDir,omitempty"` +} + +// ImageSourcePathApplyConfiguration constructs a declarative configuration of the ImageSourcePath type for use with +// apply. +func ImageSourcePath() *ImageSourcePathApplyConfiguration { + return &ImageSourcePathApplyConfiguration{} +} + +// WithSourcePath sets the SourcePath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SourcePath field is set to the value of the last call. +func (b *ImageSourcePathApplyConfiguration) WithSourcePath(value string) *ImageSourcePathApplyConfiguration { + b.SourcePath = &value + return b +} + +// WithDestinationDir sets the DestinationDir field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DestinationDir field is set to the value of the last call. +func (b *ImageSourcePathApplyConfiguration) WithDestinationDir(value string) *ImageSourcePathApplyConfiguration { + b.DestinationDir = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagestreamtagreference.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagestreamtagreference.go new file mode 100644 index 0000000000000..0e6445f15d195 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagestreamtagreference.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ImageStreamTagReferenceApplyConfiguration represents a declarative configuration of the ImageStreamTagReference type for use +// with apply. +type ImageStreamTagReferenceApplyConfiguration struct { + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` +} + +// ImageStreamTagReferenceApplyConfiguration constructs a declarative configuration of the ImageStreamTagReference type for use with +// apply. +func ImageStreamTagReference() *ImageStreamTagReferenceApplyConfiguration { + return &ImageStreamTagReferenceApplyConfiguration{} +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ImageStreamTagReferenceApplyConfiguration) WithNamespace(value string) *ImageStreamTagReferenceApplyConfiguration { + b.Namespace = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImageStreamTagReferenceApplyConfiguration) WithName(value string) *ImageStreamTagReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/jenkinspipelinebuildstrategy.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/jenkinspipelinebuildstrategy.go new file mode 100644 index 0000000000000..940caa1b304c9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/jenkinspipelinebuildstrategy.go @@ -0,0 +1,47 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// JenkinsPipelineBuildStrategyApplyConfiguration represents a declarative configuration of the JenkinsPipelineBuildStrategy type for use +// with apply. +type JenkinsPipelineBuildStrategyApplyConfiguration struct { + JenkinsfilePath *string `json:"jenkinsfilePath,omitempty"` + Jenkinsfile *string `json:"jenkinsfile,omitempty"` + Env []corev1.EnvVar `json:"env,omitempty"` +} + +// JenkinsPipelineBuildStrategyApplyConfiguration constructs a declarative configuration of the JenkinsPipelineBuildStrategy type for use with +// apply. +func JenkinsPipelineBuildStrategy() *JenkinsPipelineBuildStrategyApplyConfiguration { + return &JenkinsPipelineBuildStrategyApplyConfiguration{} +} + +// WithJenkinsfilePath sets the JenkinsfilePath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the JenkinsfilePath field is set to the value of the last call. +func (b *JenkinsPipelineBuildStrategyApplyConfiguration) WithJenkinsfilePath(value string) *JenkinsPipelineBuildStrategyApplyConfiguration { + b.JenkinsfilePath = &value + return b +} + +// WithJenkinsfile sets the Jenkinsfile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Jenkinsfile field is set to the value of the last call. +func (b *JenkinsPipelineBuildStrategyApplyConfiguration) WithJenkinsfile(value string) *JenkinsPipelineBuildStrategyApplyConfiguration { + b.Jenkinsfile = &value + return b +} + +// WithEnv adds the given value to the Env field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Env field. +func (b *JenkinsPipelineBuildStrategyApplyConfiguration) WithEnv(values ...corev1.EnvVar) *JenkinsPipelineBuildStrategyApplyConfiguration { + for i := range values { + b.Env = append(b.Env, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/proxyconfig.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/proxyconfig.go new file mode 100644 index 0000000000000..db62ad5e5b4c3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/proxyconfig.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ProxyConfigApplyConfiguration represents a declarative configuration of the ProxyConfig type for use +// with apply. +type ProxyConfigApplyConfiguration struct { + HTTPProxy *string `json:"httpProxy,omitempty"` + HTTPSProxy *string `json:"httpsProxy,omitempty"` + NoProxy *string `json:"noProxy,omitempty"` +} + +// ProxyConfigApplyConfiguration constructs a declarative configuration of the ProxyConfig type for use with +// apply. +func ProxyConfig() *ProxyConfigApplyConfiguration { + return &ProxyConfigApplyConfiguration{} +} + +// WithHTTPProxy sets the HTTPProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPProxy field is set to the value of the last call. +func (b *ProxyConfigApplyConfiguration) WithHTTPProxy(value string) *ProxyConfigApplyConfiguration { + b.HTTPProxy = &value + return b +} + +// WithHTTPSProxy sets the HTTPSProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPSProxy field is set to the value of the last call. +func (b *ProxyConfigApplyConfiguration) WithHTTPSProxy(value string) *ProxyConfigApplyConfiguration { + b.HTTPSProxy = &value + return b +} + +// WithNoProxy sets the NoProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NoProxy field is set to the value of the last call. +func (b *ProxyConfigApplyConfiguration) WithNoProxy(value string) *ProxyConfigApplyConfiguration { + b.NoProxy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretbuildsource.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretbuildsource.go new file mode 100644 index 0000000000000..7c6d9c83a234e --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretbuildsource.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// SecretBuildSourceApplyConfiguration represents a declarative configuration of the SecretBuildSource type for use +// with apply. +type SecretBuildSourceApplyConfiguration struct { + Secret *corev1.LocalObjectReference `json:"secret,omitempty"` + DestinationDir *string `json:"destinationDir,omitempty"` +} + +// SecretBuildSourceApplyConfiguration constructs a declarative configuration of the SecretBuildSource type for use with +// apply. +func SecretBuildSource() *SecretBuildSourceApplyConfiguration { + return &SecretBuildSourceApplyConfiguration{} +} + +// WithSecret sets the Secret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Secret field is set to the value of the last call. +func (b *SecretBuildSourceApplyConfiguration) WithSecret(value corev1.LocalObjectReference) *SecretBuildSourceApplyConfiguration { + b.Secret = &value + return b +} + +// WithDestinationDir sets the DestinationDir field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DestinationDir field is set to the value of the last call. +func (b *SecretBuildSourceApplyConfiguration) WithDestinationDir(value string) *SecretBuildSourceApplyConfiguration { + b.DestinationDir = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretlocalreference.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretlocalreference.go new file mode 100644 index 0000000000000..b5713fc6c702d --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretlocalreference.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// SecretLocalReferenceApplyConfiguration represents a declarative configuration of the SecretLocalReference type for use +// with apply. +type SecretLocalReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// SecretLocalReferenceApplyConfiguration constructs a declarative configuration of the SecretLocalReference type for use with +// apply. +func SecretLocalReference() *SecretLocalReferenceApplyConfiguration { + return &SecretLocalReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *SecretLocalReferenceApplyConfiguration) WithName(value string) *SecretLocalReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretspec.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretspec.go new file mode 100644 index 0000000000000..b46ebed22397e --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretspec.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// SecretSpecApplyConfiguration represents a declarative configuration of the SecretSpec type for use +// with apply. +type SecretSpecApplyConfiguration struct { + SecretSource *corev1.LocalObjectReference `json:"secretSource,omitempty"` + MountPath *string `json:"mountPath,omitempty"` +} + +// SecretSpecApplyConfiguration constructs a declarative configuration of the SecretSpec type for use with +// apply. +func SecretSpec() *SecretSpecApplyConfiguration { + return &SecretSpecApplyConfiguration{} +} + +// WithSecretSource sets the SecretSource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SecretSource field is set to the value of the last call. +func (b *SecretSpecApplyConfiguration) WithSecretSource(value corev1.LocalObjectReference) *SecretSpecApplyConfiguration { + b.SecretSource = &value + return b +} + +// WithMountPath sets the MountPath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MountPath field is set to the value of the last call. +func (b *SecretSpecApplyConfiguration) WithMountPath(value string) *SecretSpecApplyConfiguration { + b.MountPath = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcebuildstrategy.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcebuildstrategy.go new file mode 100644 index 0000000000000..ade973c2ef3b2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcebuildstrategy.go @@ -0,0 +1,88 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// SourceBuildStrategyApplyConfiguration represents a declarative configuration of the SourceBuildStrategy type for use +// with apply. +type SourceBuildStrategyApplyConfiguration struct { + From *corev1.ObjectReference `json:"from,omitempty"` + PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty"` + Env []corev1.EnvVar `json:"env,omitempty"` + Scripts *string `json:"scripts,omitempty"` + Incremental *bool `json:"incremental,omitempty"` + ForcePull *bool `json:"forcePull,omitempty"` + Volumes []BuildVolumeApplyConfiguration `json:"volumes,omitempty"` +} + +// SourceBuildStrategyApplyConfiguration constructs a declarative configuration of the SourceBuildStrategy type for use with +// apply. +func SourceBuildStrategy() *SourceBuildStrategyApplyConfiguration { + return &SourceBuildStrategyApplyConfiguration{} +} + +// WithFrom sets the From field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the From field is set to the value of the last call. +func (b *SourceBuildStrategyApplyConfiguration) WithFrom(value corev1.ObjectReference) *SourceBuildStrategyApplyConfiguration { + b.From = &value + return b +} + +// WithPullSecret sets the PullSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PullSecret field is set to the value of the last call. +func (b *SourceBuildStrategyApplyConfiguration) WithPullSecret(value corev1.LocalObjectReference) *SourceBuildStrategyApplyConfiguration { + b.PullSecret = &value + return b +} + +// WithEnv adds the given value to the Env field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Env field. +func (b *SourceBuildStrategyApplyConfiguration) WithEnv(values ...corev1.EnvVar) *SourceBuildStrategyApplyConfiguration { + for i := range values { + b.Env = append(b.Env, values[i]) + } + return b +} + +// WithScripts sets the Scripts field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Scripts field is set to the value of the last call. +func (b *SourceBuildStrategyApplyConfiguration) WithScripts(value string) *SourceBuildStrategyApplyConfiguration { + b.Scripts = &value + return b +} + +// WithIncremental sets the Incremental field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Incremental field is set to the value of the last call. +func (b *SourceBuildStrategyApplyConfiguration) WithIncremental(value bool) *SourceBuildStrategyApplyConfiguration { + b.Incremental = &value + return b +} + +// WithForcePull sets the ForcePull field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ForcePull field is set to the value of the last call. +func (b *SourceBuildStrategyApplyConfiguration) WithForcePull(value bool) *SourceBuildStrategyApplyConfiguration { + b.ForcePull = &value + return b +} + +// WithVolumes adds the given value to the Volumes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Volumes field. +func (b *SourceBuildStrategyApplyConfiguration) WithVolumes(values ...*BuildVolumeApplyConfiguration) *SourceBuildStrategyApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithVolumes") + } + b.Volumes = append(b.Volumes, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcecontroluser.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcecontroluser.go new file mode 100644 index 0000000000000..eddaf6a1d223b --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcecontroluser.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// SourceControlUserApplyConfiguration represents a declarative configuration of the SourceControlUser type for use +// with apply. +type SourceControlUserApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Email *string `json:"email,omitempty"` +} + +// SourceControlUserApplyConfiguration constructs a declarative configuration of the SourceControlUser type for use with +// apply. +func SourceControlUser() *SourceControlUserApplyConfiguration { + return &SourceControlUserApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *SourceControlUserApplyConfiguration) WithName(value string) *SourceControlUserApplyConfiguration { + b.Name = &value + return b +} + +// WithEmail sets the Email field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Email field is set to the value of the last call. +func (b *SourceControlUserApplyConfiguration) WithEmail(value string) *SourceControlUserApplyConfiguration { + b.Email = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcerevision.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcerevision.go new file mode 100644 index 0000000000000..e99261ac85fff --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcerevision.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" +) + +// SourceRevisionApplyConfiguration represents a declarative configuration of the SourceRevision type for use +// with apply. +type SourceRevisionApplyConfiguration struct { + Type *buildv1.BuildSourceType `json:"type,omitempty"` + Git *GitSourceRevisionApplyConfiguration `json:"git,omitempty"` +} + +// SourceRevisionApplyConfiguration constructs a declarative configuration of the SourceRevision type for use with +// apply. +func SourceRevision() *SourceRevisionApplyConfiguration { + return &SourceRevisionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *SourceRevisionApplyConfiguration) WithType(value buildv1.BuildSourceType) *SourceRevisionApplyConfiguration { + b.Type = &value + return b +} + +// WithGit sets the Git field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Git field is set to the value of the last call. +func (b *SourceRevisionApplyConfiguration) WithGit(value *GitSourceRevisionApplyConfiguration) *SourceRevisionApplyConfiguration { + b.Git = value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/stageinfo.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/stageinfo.go new file mode 100644 index 0000000000000..1f7e390bb4018 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/stageinfo.go @@ -0,0 +1,60 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// StageInfoApplyConfiguration represents a declarative configuration of the StageInfo type for use +// with apply. +type StageInfoApplyConfiguration struct { + Name *buildv1.StageName `json:"name,omitempty"` + StartTime *metav1.Time `json:"startTime,omitempty"` + DurationMilliseconds *int64 `json:"durationMilliseconds,omitempty"` + Steps []StepInfoApplyConfiguration `json:"steps,omitempty"` +} + +// StageInfoApplyConfiguration constructs a declarative configuration of the StageInfo type for use with +// apply. +func StageInfo() *StageInfoApplyConfiguration { + return &StageInfoApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *StageInfoApplyConfiguration) WithName(value buildv1.StageName) *StageInfoApplyConfiguration { + b.Name = &value + return b +} + +// WithStartTime sets the StartTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StartTime field is set to the value of the last call. +func (b *StageInfoApplyConfiguration) WithStartTime(value metav1.Time) *StageInfoApplyConfiguration { + b.StartTime = &value + return b +} + +// WithDurationMilliseconds sets the DurationMilliseconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DurationMilliseconds field is set to the value of the last call. +func (b *StageInfoApplyConfiguration) WithDurationMilliseconds(value int64) *StageInfoApplyConfiguration { + b.DurationMilliseconds = &value + return b +} + +// WithSteps adds the given value to the Steps field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Steps field. +func (b *StageInfoApplyConfiguration) WithSteps(values ...*StepInfoApplyConfiguration) *StageInfoApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSteps") + } + b.Steps = append(b.Steps, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/stepinfo.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/stepinfo.go new file mode 100644 index 0000000000000..4049aa5e53880 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/stepinfo.go @@ -0,0 +1,46 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// StepInfoApplyConfiguration represents a declarative configuration of the StepInfo type for use +// with apply. +type StepInfoApplyConfiguration struct { + Name *buildv1.StepName `json:"name,omitempty"` + StartTime *metav1.Time `json:"startTime,omitempty"` + DurationMilliseconds *int64 `json:"durationMilliseconds,omitempty"` +} + +// StepInfoApplyConfiguration constructs a declarative configuration of the StepInfo type for use with +// apply. +func StepInfo() *StepInfoApplyConfiguration { + return &StepInfoApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *StepInfoApplyConfiguration) WithName(value buildv1.StepName) *StepInfoApplyConfiguration { + b.Name = &value + return b +} + +// WithStartTime sets the StartTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StartTime field is set to the value of the last call. +func (b *StepInfoApplyConfiguration) WithStartTime(value metav1.Time) *StepInfoApplyConfiguration { + b.StartTime = &value + return b +} + +// WithDurationMilliseconds sets the DurationMilliseconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DurationMilliseconds field is set to the value of the last call. +func (b *StepInfoApplyConfiguration) WithDurationMilliseconds(value int64) *StepInfoApplyConfiguration { + b.DurationMilliseconds = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/webhooktrigger.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/webhooktrigger.go new file mode 100644 index 0000000000000..f094a74e6ab81 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/webhooktrigger.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// WebHookTriggerApplyConfiguration represents a declarative configuration of the WebHookTrigger type for use +// with apply. +type WebHookTriggerApplyConfiguration struct { + Secret *string `json:"secret,omitempty"` + AllowEnv *bool `json:"allowEnv,omitempty"` + SecretReference *SecretLocalReferenceApplyConfiguration `json:"secretReference,omitempty"` +} + +// WebHookTriggerApplyConfiguration constructs a declarative configuration of the WebHookTrigger type for use with +// apply. +func WebHookTrigger() *WebHookTriggerApplyConfiguration { + return &WebHookTriggerApplyConfiguration{} +} + +// WithSecret sets the Secret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Secret field is set to the value of the last call. +func (b *WebHookTriggerApplyConfiguration) WithSecret(value string) *WebHookTriggerApplyConfiguration { + b.Secret = &value + return b +} + +// WithAllowEnv sets the AllowEnv field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllowEnv field is set to the value of the last call. +func (b *WebHookTriggerApplyConfiguration) WithAllowEnv(value bool) *WebHookTriggerApplyConfiguration { + b.AllowEnv = &value + return b +} + +// WithSecretReference sets the SecretReference field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SecretReference field is set to the value of the last call. +func (b *WebHookTriggerApplyConfiguration) WithSecretReference(value *SecretLocalReferenceApplyConfiguration) *WebHookTriggerApplyConfiguration { + b.SecretReference = value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/internal/internal.go new file mode 100644 index 0000000000000..4b1e370810258 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/internal/internal.go @@ -0,0 +1,1195 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package internal + +import ( + fmt "fmt" + sync "sync" + + typed "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +func Parser() *typed.Parser { + parserOnce.Do(func() { + var err error + parser, err = typed.NewParser(schemaYAML) + if err != nil { + panic(fmt.Sprintf("Failed to parse schema: %v", err)) + } + }) + return parser +} + +var parserOnce sync.Once +var parser *typed.Parser +var schemaYAML = typed.YAMLObject(`types: +- name: com.github.openshift.api.build.v1.BinaryBuildSource + map: + fields: + - name: asFile + type: + scalar: string +- name: com.github.openshift.api.build.v1.BitbucketWebHookCause + map: + fields: + - name: revision + type: + namedType: com.github.openshift.api.build.v1.SourceRevision + - name: secret + type: + scalar: string +- name: com.github.openshift.api.build.v1.Build + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.build.v1.BuildSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.build.v1.BuildStatus + default: {} +- name: com.github.openshift.api.build.v1.BuildCondition + map: + fields: + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: lastUpdateTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message + type: + scalar: string + - name: reason + type: + scalar: string + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.build.v1.BuildConfig + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.build.v1.BuildConfigSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.build.v1.BuildConfigStatus + default: {} +- name: com.github.openshift.api.build.v1.BuildConfigSpec + map: + fields: + - name: completionDeadlineSeconds + type: + scalar: numeric + - name: failedBuildsHistoryLimit + type: + scalar: numeric + - name: mountTrustedCA + type: + scalar: boolean + - name: nodeSelector + type: + map: + elementType: + scalar: string + - name: output + type: + namedType: com.github.openshift.api.build.v1.BuildOutput + default: {} + - name: postCommit + type: + namedType: com.github.openshift.api.build.v1.BuildPostCommitSpec + default: {} + - name: resources + type: + namedType: io.k8s.api.core.v1.ResourceRequirements + default: {} + - name: revision + type: + namedType: com.github.openshift.api.build.v1.SourceRevision + - name: runPolicy + type: + scalar: string + - name: serviceAccount + type: + scalar: string + - name: source + type: + namedType: com.github.openshift.api.build.v1.BuildSource + default: {} + - name: strategy + type: + namedType: com.github.openshift.api.build.v1.BuildStrategy + default: {} + - name: successfulBuildsHistoryLimit + type: + scalar: numeric + - name: triggers + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.BuildTriggerPolicy + elementRelationship: atomic +- name: com.github.openshift.api.build.v1.BuildConfigStatus + map: + fields: + - name: imageChangeTriggers + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.ImageChangeTriggerStatus + elementRelationship: atomic + - name: lastVersion + type: + scalar: numeric + default: 0 +- name: com.github.openshift.api.build.v1.BuildOutput + map: + fields: + - name: imageLabels + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.ImageLabel + elementRelationship: atomic + - name: pushSecret + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: to + type: + namedType: io.k8s.api.core.v1.ObjectReference +- name: com.github.openshift.api.build.v1.BuildPostCommitSpec + map: + fields: + - name: args + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: command + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: script + type: + scalar: string +- name: com.github.openshift.api.build.v1.BuildSource + map: + fields: + - name: binary + type: + namedType: com.github.openshift.api.build.v1.BinaryBuildSource + - name: configMaps + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.ConfigMapBuildSource + elementRelationship: atomic + - name: contextDir + type: + scalar: string + - name: dockerfile + type: + scalar: string + - name: git + type: + namedType: com.github.openshift.api.build.v1.GitBuildSource + - name: images + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.ImageSource + elementRelationship: atomic + - name: secrets + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.SecretBuildSource + elementRelationship: atomic + - name: sourceSecret + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: type + type: + scalar: string +- name: com.github.openshift.api.build.v1.BuildSpec + map: + fields: + - name: completionDeadlineSeconds + type: + scalar: numeric + - name: mountTrustedCA + type: + scalar: boolean + - name: nodeSelector + type: + map: + elementType: + scalar: string + - name: output + type: + namedType: com.github.openshift.api.build.v1.BuildOutput + default: {} + - name: postCommit + type: + namedType: com.github.openshift.api.build.v1.BuildPostCommitSpec + default: {} + - name: resources + type: + namedType: io.k8s.api.core.v1.ResourceRequirements + default: {} + - name: revision + type: + namedType: com.github.openshift.api.build.v1.SourceRevision + - name: serviceAccount + type: + scalar: string + - name: source + type: + namedType: com.github.openshift.api.build.v1.BuildSource + default: {} + - name: strategy + type: + namedType: com.github.openshift.api.build.v1.BuildStrategy + default: {} + - name: triggeredBy + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.BuildTriggerCause + elementRelationship: atomic +- name: com.github.openshift.api.build.v1.BuildStatus + map: + fields: + - name: cancelled + type: + scalar: boolean + - name: completionTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.BuildCondition + elementRelationship: associative + keys: + - type + - name: config + type: + namedType: io.k8s.api.core.v1.ObjectReference + - name: duration + type: + scalar: numeric + - name: logSnippet + type: + scalar: string + - name: message + type: + scalar: string + - name: output + type: + namedType: com.github.openshift.api.build.v1.BuildStatusOutput + default: {} + - name: outputDockerImageReference + type: + scalar: string + - name: phase + type: + scalar: string + default: "" + - name: reason + type: + scalar: string + - name: stages + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.StageInfo + elementRelationship: atomic + - name: startTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: com.github.openshift.api.build.v1.BuildStatusOutput + map: + fields: + - name: to + type: + namedType: com.github.openshift.api.build.v1.BuildStatusOutputTo +- name: com.github.openshift.api.build.v1.BuildStatusOutputTo + map: + fields: + - name: imageDigest + type: + scalar: string +- name: com.github.openshift.api.build.v1.BuildStrategy + map: + fields: + - name: customStrategy + type: + namedType: com.github.openshift.api.build.v1.CustomBuildStrategy + - name: dockerStrategy + type: + namedType: com.github.openshift.api.build.v1.DockerBuildStrategy + - name: jenkinsPipelineStrategy + type: + namedType: com.github.openshift.api.build.v1.JenkinsPipelineBuildStrategy + - name: sourceStrategy + type: + namedType: com.github.openshift.api.build.v1.SourceBuildStrategy + - name: type + type: + scalar: string +- name: com.github.openshift.api.build.v1.BuildTriggerCause + map: + fields: + - name: bitbucketWebHook + type: + namedType: com.github.openshift.api.build.v1.BitbucketWebHookCause + - name: genericWebHook + type: + namedType: com.github.openshift.api.build.v1.GenericWebHookCause + - name: githubWebHook + type: + namedType: com.github.openshift.api.build.v1.GitHubWebHookCause + - name: gitlabWebHook + type: + namedType: com.github.openshift.api.build.v1.GitLabWebHookCause + - name: imageChangeBuild + type: + namedType: com.github.openshift.api.build.v1.ImageChangeCause + - name: message + type: + scalar: string +- name: com.github.openshift.api.build.v1.BuildTriggerPolicy + map: + fields: + - name: bitbucket + type: + namedType: com.github.openshift.api.build.v1.WebHookTrigger + - name: generic + type: + namedType: com.github.openshift.api.build.v1.WebHookTrigger + - name: github + type: + namedType: com.github.openshift.api.build.v1.WebHookTrigger + - name: gitlab + type: + namedType: com.github.openshift.api.build.v1.WebHookTrigger + - name: imageChange + type: + namedType: com.github.openshift.api.build.v1.ImageChangeTrigger + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.build.v1.BuildVolume + map: + fields: + - name: mounts + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.BuildVolumeMount + elementRelationship: associative + keys: + - destinationPath + - name: name + type: + scalar: string + default: "" + - name: source + type: + namedType: com.github.openshift.api.build.v1.BuildVolumeSource + default: {} +- name: com.github.openshift.api.build.v1.BuildVolumeMount + map: + fields: + - name: destinationPath + type: + scalar: string + default: "" +- name: com.github.openshift.api.build.v1.BuildVolumeSource + map: + fields: + - name: configMap + type: + namedType: io.k8s.api.core.v1.ConfigMapVolumeSource + - name: csi + type: + namedType: io.k8s.api.core.v1.CSIVolumeSource + - name: secret + type: + namedType: io.k8s.api.core.v1.SecretVolumeSource + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.build.v1.ConfigMapBuildSource + map: + fields: + - name: configMap + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + default: {} + - name: destinationDir + type: + scalar: string +- name: com.github.openshift.api.build.v1.CustomBuildStrategy + map: + fields: + - name: buildAPIVersion + type: + scalar: string + - name: env + type: + list: + elementType: + namedType: io.k8s.api.core.v1.EnvVar + elementRelationship: atomic + - name: exposeDockerSocket + type: + scalar: boolean + - name: forcePull + type: + scalar: boolean + - name: from + type: + namedType: io.k8s.api.core.v1.ObjectReference + default: {} + - name: pullSecret + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: secrets + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.SecretSpec + elementRelationship: atomic +- name: com.github.openshift.api.build.v1.DockerBuildStrategy + map: + fields: + - name: buildArgs + type: + list: + elementType: + namedType: io.k8s.api.core.v1.EnvVar + elementRelationship: atomic + - name: dockerfilePath + type: + scalar: string + - name: env + type: + list: + elementType: + namedType: io.k8s.api.core.v1.EnvVar + elementRelationship: atomic + - name: forcePull + type: + scalar: boolean + - name: from + type: + namedType: io.k8s.api.core.v1.ObjectReference + - name: imageOptimizationPolicy + type: + scalar: string + - name: noCache + type: + scalar: boolean + - name: pullSecret + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: volumes + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.BuildVolume + elementRelationship: associative + keys: + - name +- name: com.github.openshift.api.build.v1.GenericWebHookCause + map: + fields: + - name: revision + type: + namedType: com.github.openshift.api.build.v1.SourceRevision + - name: secret + type: + scalar: string +- name: com.github.openshift.api.build.v1.GitBuildSource + map: + fields: + - name: httpProxy + type: + scalar: string + - name: httpsProxy + type: + scalar: string + - name: noProxy + type: + scalar: string + - name: ref + type: + scalar: string + - name: uri + type: + scalar: string + default: "" +- name: com.github.openshift.api.build.v1.GitHubWebHookCause + map: + fields: + - name: revision + type: + namedType: com.github.openshift.api.build.v1.SourceRevision + - name: secret + type: + scalar: string +- name: com.github.openshift.api.build.v1.GitLabWebHookCause + map: + fields: + - name: revision + type: + namedType: com.github.openshift.api.build.v1.SourceRevision + - name: secret + type: + scalar: string +- name: com.github.openshift.api.build.v1.GitSourceRevision + map: + fields: + - name: author + type: + namedType: com.github.openshift.api.build.v1.SourceControlUser + default: {} + - name: commit + type: + scalar: string + - name: committer + type: + namedType: com.github.openshift.api.build.v1.SourceControlUser + default: {} + - name: message + type: + scalar: string +- name: com.github.openshift.api.build.v1.ImageChangeCause + map: + fields: + - name: fromRef + type: + namedType: io.k8s.api.core.v1.ObjectReference + - name: imageID + type: + scalar: string +- name: com.github.openshift.api.build.v1.ImageChangeTrigger + map: + fields: + - name: from + type: + namedType: io.k8s.api.core.v1.ObjectReference + - name: lastTriggeredImageID + type: + scalar: string + - name: paused + type: + scalar: boolean +- name: com.github.openshift.api.build.v1.ImageChangeTriggerStatus + map: + fields: + - name: from + type: + namedType: com.github.openshift.api.build.v1.ImageStreamTagReference + default: {} + - name: lastTriggerTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: lastTriggeredImageID + type: + scalar: string +- name: com.github.openshift.api.build.v1.ImageLabel + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: value + type: + scalar: string +- name: com.github.openshift.api.build.v1.ImageSource + map: + fields: + - name: as + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: from + type: + namedType: io.k8s.api.core.v1.ObjectReference + default: {} + - name: paths + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.ImageSourcePath + elementRelationship: atomic + - name: pullSecret + type: + namedType: io.k8s.api.core.v1.LocalObjectReference +- name: com.github.openshift.api.build.v1.ImageSourcePath + map: + fields: + - name: destinationDir + type: + scalar: string + default: "" + - name: sourcePath + type: + scalar: string + default: "" +- name: com.github.openshift.api.build.v1.ImageStreamTagReference + map: + fields: + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string +- name: com.github.openshift.api.build.v1.JenkinsPipelineBuildStrategy + map: + fields: + - name: env + type: + list: + elementType: + namedType: io.k8s.api.core.v1.EnvVar + elementRelationship: atomic + - name: jenkinsfile + type: + scalar: string + - name: jenkinsfilePath + type: + scalar: string +- name: com.github.openshift.api.build.v1.SecretBuildSource + map: + fields: + - name: destinationDir + type: + scalar: string + - name: secret + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + default: {} +- name: com.github.openshift.api.build.v1.SecretLocalReference + map: + fields: + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.build.v1.SecretSpec + map: + fields: + - name: mountPath + type: + scalar: string + default: "" + - name: secretSource + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + default: {} +- name: com.github.openshift.api.build.v1.SourceBuildStrategy + map: + fields: + - name: env + type: + list: + elementType: + namedType: io.k8s.api.core.v1.EnvVar + elementRelationship: atomic + - name: forcePull + type: + scalar: boolean + - name: from + type: + namedType: io.k8s.api.core.v1.ObjectReference + default: {} + - name: incremental + type: + scalar: boolean + - name: pullSecret + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: scripts + type: + scalar: string + - name: volumes + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.BuildVolume + elementRelationship: associative + keys: + - name +- name: com.github.openshift.api.build.v1.SourceControlUser + map: + fields: + - name: email + type: + scalar: string + - name: name + type: + scalar: string +- name: com.github.openshift.api.build.v1.SourceRevision + map: + fields: + - name: git + type: + namedType: com.github.openshift.api.build.v1.GitSourceRevision + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.build.v1.StageInfo + map: + fields: + - name: durationMilliseconds + type: + scalar: numeric + - name: name + type: + scalar: string + - name: startTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: steps + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.StepInfo + elementRelationship: atomic +- name: com.github.openshift.api.build.v1.StepInfo + map: + fields: + - name: durationMilliseconds + type: + scalar: numeric + - name: name + type: + scalar: string + - name: startTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: com.github.openshift.api.build.v1.WebHookTrigger + map: + fields: + - name: allowEnv + type: + scalar: boolean + - name: secret + type: + scalar: string + - name: secretReference + type: + namedType: com.github.openshift.api.build.v1.SecretLocalReference +- name: io.k8s.api.core.v1.CSIVolumeSource + map: + fields: + - name: driver + type: + scalar: string + default: "" + - name: fsType + type: + scalar: string + - name: nodePublishSecretRef + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: readOnly + type: + scalar: boolean + - name: volumeAttributes + type: + map: + elementType: + scalar: string +- name: io.k8s.api.core.v1.ConfigMapKeySelector + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: optional + type: + scalar: boolean + elementRelationship: atomic +- name: io.k8s.api.core.v1.ConfigMapVolumeSource + map: + fields: + - name: defaultMode + type: + scalar: numeric + - name: items + type: + list: + elementType: + namedType: io.k8s.api.core.v1.KeyToPath + elementRelationship: atomic + - name: name + type: + scalar: string + default: "" + - name: optional + type: + scalar: boolean +- name: io.k8s.api.core.v1.EnvVar + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: value + type: + scalar: string + - name: valueFrom + type: + namedType: io.k8s.api.core.v1.EnvVarSource +- name: io.k8s.api.core.v1.EnvVarSource + map: + fields: + - name: configMapKeyRef + type: + namedType: io.k8s.api.core.v1.ConfigMapKeySelector + - name: fieldRef + type: + namedType: io.k8s.api.core.v1.ObjectFieldSelector + - name: resourceFieldRef + type: + namedType: io.k8s.api.core.v1.ResourceFieldSelector + - name: secretKeyRef + type: + namedType: io.k8s.api.core.v1.SecretKeySelector +- name: io.k8s.api.core.v1.KeyToPath + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: mode + type: + scalar: numeric + - name: path + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.LocalObjectReference + map: + fields: + - name: name + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.api.core.v1.ObjectFieldSelector + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldPath + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.api.core.v1.ObjectReference + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldPath + type: + scalar: string + - name: kind + type: + scalar: string + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: resourceVersion + type: + scalar: string + - name: uid + type: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.core.v1.ResourceClaim + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: request + type: + scalar: string +- name: io.k8s.api.core.v1.ResourceFieldSelector + map: + fields: + - name: containerName + type: + scalar: string + - name: divisor + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: resource + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.api.core.v1.ResourceRequirements + map: + fields: + - name: claims + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ResourceClaim + elementRelationship: associative + keys: + - name + - name: limits + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: requests + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.core.v1.SecretKeySelector + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: optional + type: + scalar: boolean + elementRelationship: atomic +- name: io.k8s.api.core.v1.SecretVolumeSource + map: + fields: + - name: defaultMode + type: + scalar: numeric + - name: items + type: + list: + elementType: + namedType: io.k8s.api.core.v1.KeyToPath + elementRelationship: atomic + - name: optional + type: + scalar: boolean + - name: secretName + type: + scalar: string +- name: io.k8s.apimachinery.pkg.api.resource.Quantity + scalar: untyped +- name: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldsType + type: + scalar: string + - name: fieldsV1 + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + - name: manager + type: + scalar: string + - name: operation + type: + scalar: string + - name: subresource + type: + scalar: string + - name: time + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + map: + fields: + - name: annotations + type: + map: + elementType: + scalar: string + - name: creationTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: deletionGracePeriodSeconds + type: + scalar: numeric + - name: deletionTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: finalizers + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: generateName + type: + scalar: string + - name: generation + type: + scalar: numeric + - name: labels + type: + map: + elementType: + scalar: string + - name: managedFields + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + elementRelationship: atomic + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: ownerReferences + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + elementRelationship: associative + keys: + - uid + - name: resourceVersion + type: + scalar: string + - name: selfLink + type: + scalar: string + - name: uid + type: + scalar: string +- name: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + map: + fields: + - name: apiVersion + type: + scalar: string + default: "" + - name: blockOwnerDeletion + type: + scalar: boolean + - name: controller + type: + scalar: boolean + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: uid + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Time + scalar: untyped +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +- name: __untyped_deduced_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +`) diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/clientset.go new file mode 100644 index 0000000000000..ef55d34604b3a --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/clientset.go @@ -0,0 +1,104 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + fmt "fmt" + http "net/http" + + buildv1 "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + BuildV1() buildv1.BuildV1Interface +} + +// Clientset contains the clients for groups. +type Clientset struct { + *discovery.DiscoveryClient + buildV1 *buildv1.BuildV1Client +} + +// BuildV1 retrieves the BuildV1Client +func (c *Clientset) BuildV1() buildv1.BuildV1Interface { + return c.buildV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.buildV1, err = buildv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.buildV1 = buildv1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000000..14db57a58f8d2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/scheme/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000000..faa53af8f2d73 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/scheme/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + buildv1 "github.com/openshift/api/build/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + buildv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/build.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/build.go new file mode 100644 index 0000000000000..4be0e1f0aaebd --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/build.go @@ -0,0 +1,91 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + buildv1 "github.com/openshift/api/build/v1" + applyconfigurationsbuildv1 "github.com/openshift/client-go/build/applyconfigurations/build/v1" + scheme "github.com/openshift/client-go/build/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// BuildsGetter has a method to return a BuildInterface. +// A group's client should implement this interface. +type BuildsGetter interface { + Builds(namespace string) BuildInterface +} + +// BuildInterface has methods to work with Build resources. +type BuildInterface interface { + Create(ctx context.Context, build *buildv1.Build, opts metav1.CreateOptions) (*buildv1.Build, error) + Update(ctx context.Context, build *buildv1.Build, opts metav1.UpdateOptions) (*buildv1.Build, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, build *buildv1.Build, opts metav1.UpdateOptions) (*buildv1.Build, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*buildv1.Build, error) + List(ctx context.Context, opts metav1.ListOptions) (*buildv1.BuildList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *buildv1.Build, err error) + Apply(ctx context.Context, build *applyconfigurationsbuildv1.BuildApplyConfiguration, opts metav1.ApplyOptions) (result *buildv1.Build, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, build *applyconfigurationsbuildv1.BuildApplyConfiguration, opts metav1.ApplyOptions) (result *buildv1.Build, err error) + UpdateDetails(ctx context.Context, buildName string, build *buildv1.Build, opts metav1.UpdateOptions) (*buildv1.Build, error) + Clone(ctx context.Context, buildName string, buildRequest *buildv1.BuildRequest, opts metav1.CreateOptions) (*buildv1.Build, error) + + BuildExpansion +} + +// builds implements BuildInterface +type builds struct { + *gentype.ClientWithListAndApply[*buildv1.Build, *buildv1.BuildList, *applyconfigurationsbuildv1.BuildApplyConfiguration] +} + +// newBuilds returns a Builds +func newBuilds(c *BuildV1Client, namespace string) *builds { + return &builds{ + gentype.NewClientWithListAndApply[*buildv1.Build, *buildv1.BuildList, *applyconfigurationsbuildv1.BuildApplyConfiguration]( + "builds", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *buildv1.Build { return &buildv1.Build{} }, + func() *buildv1.BuildList { return &buildv1.BuildList{} }, + ), + } +} + +// UpdateDetails takes the top resource name and the representation of a build and updates it. Returns the server's representation of the build, and an error, if there is any. +func (c *builds) UpdateDetails(ctx context.Context, buildName string, build *buildv1.Build, opts metav1.UpdateOptions) (result *buildv1.Build, err error) { + result = &buildv1.Build{} + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). + Resource("builds"). + Name(buildName). + SubResource("details"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(build). + Do(ctx). + Into(result) + return +} + +// Clone takes the representation of a buildRequest and creates it. Returns the server's representation of the build, and an error, if there is any. +func (c *builds) Clone(ctx context.Context, buildName string, buildRequest *buildv1.BuildRequest, opts metav1.CreateOptions) (result *buildv1.Build, err error) { + result = &buildv1.Build{} + err = c.GetClient().Post(). + Namespace(c.GetNamespace()). + Resource("builds"). + Name(buildName). + SubResource("clone"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(buildRequest). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/build_client.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/build_client.go new file mode 100644 index 0000000000000..c8e5a1f36548c --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/build_client.go @@ -0,0 +1,96 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + http "net/http" + + buildv1 "github.com/openshift/api/build/v1" + scheme "github.com/openshift/client-go/build/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type BuildV1Interface interface { + RESTClient() rest.Interface + BuildsGetter + BuildConfigsGetter +} + +// BuildV1Client is used to interact with features provided by the build.openshift.io group. +type BuildV1Client struct { + restClient rest.Interface +} + +func (c *BuildV1Client) Builds(namespace string) BuildInterface { + return newBuilds(c, namespace) +} + +func (c *BuildV1Client) BuildConfigs(namespace string) BuildConfigInterface { + return newBuildConfigs(c, namespace) +} + +// NewForConfig creates a new BuildV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*BuildV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new BuildV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*BuildV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &BuildV1Client{client}, nil +} + +// NewForConfigOrDie creates a new BuildV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *BuildV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new BuildV1Client for the given RESTClient. +func New(c rest.Interface) *BuildV1Client { + return &BuildV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := buildv1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *BuildV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/buildconfig.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/buildconfig.go new file mode 100644 index 0000000000000..16bd942026ef5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/buildconfig.go @@ -0,0 +1,75 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + buildv1 "github.com/openshift/api/build/v1" + applyconfigurationsbuildv1 "github.com/openshift/client-go/build/applyconfigurations/build/v1" + scheme "github.com/openshift/client-go/build/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// BuildConfigsGetter has a method to return a BuildConfigInterface. +// A group's client should implement this interface. +type BuildConfigsGetter interface { + BuildConfigs(namespace string) BuildConfigInterface +} + +// BuildConfigInterface has methods to work with BuildConfig resources. +type BuildConfigInterface interface { + Create(ctx context.Context, buildConfig *buildv1.BuildConfig, opts metav1.CreateOptions) (*buildv1.BuildConfig, error) + Update(ctx context.Context, buildConfig *buildv1.BuildConfig, opts metav1.UpdateOptions) (*buildv1.BuildConfig, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, buildConfig *buildv1.BuildConfig, opts metav1.UpdateOptions) (*buildv1.BuildConfig, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*buildv1.BuildConfig, error) + List(ctx context.Context, opts metav1.ListOptions) (*buildv1.BuildConfigList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *buildv1.BuildConfig, err error) + Apply(ctx context.Context, buildConfig *applyconfigurationsbuildv1.BuildConfigApplyConfiguration, opts metav1.ApplyOptions) (result *buildv1.BuildConfig, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, buildConfig *applyconfigurationsbuildv1.BuildConfigApplyConfiguration, opts metav1.ApplyOptions) (result *buildv1.BuildConfig, err error) + Instantiate(ctx context.Context, buildConfigName string, buildRequest *buildv1.BuildRequest, opts metav1.CreateOptions) (*buildv1.Build, error) + + BuildConfigExpansion +} + +// buildConfigs implements BuildConfigInterface +type buildConfigs struct { + *gentype.ClientWithListAndApply[*buildv1.BuildConfig, *buildv1.BuildConfigList, *applyconfigurationsbuildv1.BuildConfigApplyConfiguration] +} + +// newBuildConfigs returns a BuildConfigs +func newBuildConfigs(c *BuildV1Client, namespace string) *buildConfigs { + return &buildConfigs{ + gentype.NewClientWithListAndApply[*buildv1.BuildConfig, *buildv1.BuildConfigList, *applyconfigurationsbuildv1.BuildConfigApplyConfiguration]( + "buildconfigs", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *buildv1.BuildConfig { return &buildv1.BuildConfig{} }, + func() *buildv1.BuildConfigList { return &buildv1.BuildConfigList{} }, + ), + } +} + +// Instantiate takes the representation of a buildRequest and creates it. Returns the server's representation of the build, and an error, if there is any. +func (c *buildConfigs) Instantiate(ctx context.Context, buildConfigName string, buildRequest *buildv1.BuildRequest, opts metav1.CreateOptions) (result *buildv1.Build, err error) { + result = &buildv1.Build{} + err = c.GetClient().Post(). + Namespace(c.GetNamespace()). + Resource("buildconfigs"). + Name(buildConfigName). + SubResource("instantiate"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(buildRequest). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/doc.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/doc.go new file mode 100644 index 0000000000000..225e6b2be34f2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/generated_expansion.go new file mode 100644 index 0000000000000..d2a1f885c056f --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/generated_expansion.go @@ -0,0 +1,7 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type BuildExpansion interface{} + +type BuildConfigExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/interface.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/interface.go new file mode 100644 index 0000000000000..01a651928a8b1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/interface.go @@ -0,0 +1,30 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package build + +import ( + v1 "github.com/openshift/client-go/build/informers/externalversions/build/v1" + internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/build.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/build.go new file mode 100644 index 0000000000000..c5188ee3ddb08 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/build.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apibuildv1 "github.com/openshift/api/build/v1" + versioned "github.com/openshift/client-go/build/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" + buildv1 "github.com/openshift/client-go/build/listers/build/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// BuildInformer provides access to a shared informer and lister for +// Builds. +type BuildInformer interface { + Informer() cache.SharedIndexInformer + Lister() buildv1.BuildLister +} + +type buildInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewBuildInformer constructs a new informer for Build type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewBuildInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredBuildInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredBuildInformer constructs a new informer for Build type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredBuildInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BuildV1().Builds(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BuildV1().Builds(namespace).Watch(context.TODO(), options) + }, + }, + &apibuildv1.Build{}, + resyncPeriod, + indexers, + ) +} + +func (f *buildInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredBuildInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *buildInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apibuildv1.Build{}, f.defaultInformer) +} + +func (f *buildInformer) Lister() buildv1.BuildLister { + return buildv1.NewBuildLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/buildconfig.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/buildconfig.go new file mode 100644 index 0000000000000..b25b203eb9dc3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/buildconfig.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apibuildv1 "github.com/openshift/api/build/v1" + versioned "github.com/openshift/client-go/build/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" + buildv1 "github.com/openshift/client-go/build/listers/build/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// BuildConfigInformer provides access to a shared informer and lister for +// BuildConfigs. +type BuildConfigInformer interface { + Informer() cache.SharedIndexInformer + Lister() buildv1.BuildConfigLister +} + +type buildConfigInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewBuildConfigInformer constructs a new informer for BuildConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewBuildConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredBuildConfigInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredBuildConfigInformer constructs a new informer for BuildConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredBuildConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BuildV1().BuildConfigs(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BuildV1().BuildConfigs(namespace).Watch(context.TODO(), options) + }, + }, + &apibuildv1.BuildConfig{}, + resyncPeriod, + indexers, + ) +} + +func (f *buildConfigInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredBuildConfigInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *buildConfigInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apibuildv1.BuildConfig{}, f.defaultInformer) +} + +func (f *buildConfigInformer) Lister() buildv1.BuildConfigLister { + return buildv1.NewBuildConfigLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/interface.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/interface.go new file mode 100644 index 0000000000000..da69fc9bb6247 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/interface.go @@ -0,0 +1,36 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Builds returns a BuildInformer. + Builds() BuildInformer + // BuildConfigs returns a BuildConfigInformer. + BuildConfigs() BuildConfigInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Builds returns a BuildInformer. +func (v *version) Builds() BuildInformer { + return &buildInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// BuildConfigs returns a BuildConfigInformer. +func (v *version) BuildConfigs() BuildConfigInformer { + return &buildConfigInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/factory.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/factory.go new file mode 100644 index 0000000000000..0d3e90f3376db --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/factory.go @@ -0,0 +1,246 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/openshift/client-go/build/clientset/versioned" + build "github.com/openshift/client-go/build/informers/externalversions/build" + internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + transform cache.TransformFunc + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.shuttingDown { + return + } + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() + f.startedInformers[informerType] = true + } + } +} + +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + + Build() build.Interface +} + +func (f *sharedInformerFactory) Build() build.Interface { + return build.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/generic.go new file mode 100644 index 0000000000000..47b3adf9993ce --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/generic.go @@ -0,0 +1,48 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + fmt "fmt" + + v1 "github.com/openshift/api/build/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=build.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("builds"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Build().V1().Builds().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("buildconfigs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Build().V1().BuildConfigs().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000000..1bcbd5975acf7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,24 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/openshift/client-go/build/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/openshift/client-go/build/listers/build/v1/build.go b/vendor/github.com/openshift/client-go/build/listers/build/v1/build.go new file mode 100644 index 0000000000000..2b85d474045ce --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/listers/build/v1/build.go @@ -0,0 +1,54 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// BuildLister helps list Builds. +// All objects returned here must be treated as read-only. +type BuildLister interface { + // List lists all Builds in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*buildv1.Build, err error) + // Builds returns an object that can list and get Builds. + Builds(namespace string) BuildNamespaceLister + BuildListerExpansion +} + +// buildLister implements the BuildLister interface. +type buildLister struct { + listers.ResourceIndexer[*buildv1.Build] +} + +// NewBuildLister returns a new BuildLister. +func NewBuildLister(indexer cache.Indexer) BuildLister { + return &buildLister{listers.New[*buildv1.Build](indexer, buildv1.Resource("build"))} +} + +// Builds returns an object that can list and get Builds. +func (s *buildLister) Builds(namespace string) BuildNamespaceLister { + return buildNamespaceLister{listers.NewNamespaced[*buildv1.Build](s.ResourceIndexer, namespace)} +} + +// BuildNamespaceLister helps list and get Builds. +// All objects returned here must be treated as read-only. +type BuildNamespaceLister interface { + // List lists all Builds in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*buildv1.Build, err error) + // Get retrieves the Build from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*buildv1.Build, error) + BuildNamespaceListerExpansion +} + +// buildNamespaceLister implements the BuildNamespaceLister +// interface. +type buildNamespaceLister struct { + listers.ResourceIndexer[*buildv1.Build] +} diff --git a/vendor/github.com/openshift/client-go/build/listers/build/v1/buildconfig.go b/vendor/github.com/openshift/client-go/build/listers/build/v1/buildconfig.go new file mode 100644 index 0000000000000..96de9d5e2ff89 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/listers/build/v1/buildconfig.go @@ -0,0 +1,54 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// BuildConfigLister helps list BuildConfigs. +// All objects returned here must be treated as read-only. +type BuildConfigLister interface { + // List lists all BuildConfigs in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*buildv1.BuildConfig, err error) + // BuildConfigs returns an object that can list and get BuildConfigs. + BuildConfigs(namespace string) BuildConfigNamespaceLister + BuildConfigListerExpansion +} + +// buildConfigLister implements the BuildConfigLister interface. +type buildConfigLister struct { + listers.ResourceIndexer[*buildv1.BuildConfig] +} + +// NewBuildConfigLister returns a new BuildConfigLister. +func NewBuildConfigLister(indexer cache.Indexer) BuildConfigLister { + return &buildConfigLister{listers.New[*buildv1.BuildConfig](indexer, buildv1.Resource("buildconfig"))} +} + +// BuildConfigs returns an object that can list and get BuildConfigs. +func (s *buildConfigLister) BuildConfigs(namespace string) BuildConfigNamespaceLister { + return buildConfigNamespaceLister{listers.NewNamespaced[*buildv1.BuildConfig](s.ResourceIndexer, namespace)} +} + +// BuildConfigNamespaceLister helps list and get BuildConfigs. +// All objects returned here must be treated as read-only. +type BuildConfigNamespaceLister interface { + // List lists all BuildConfigs in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*buildv1.BuildConfig, err error) + // Get retrieves the BuildConfig from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*buildv1.BuildConfig, error) + BuildConfigNamespaceListerExpansion +} + +// buildConfigNamespaceLister implements the BuildConfigNamespaceLister +// interface. +type buildConfigNamespaceLister struct { + listers.ResourceIndexer[*buildv1.BuildConfig] +} diff --git a/vendor/github.com/openshift/client-go/build/listers/build/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/build/listers/build/v1/expansion_generated.go new file mode 100644 index 0000000000000..1fc9faecdd2df --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/listers/build/v1/expansion_generated.go @@ -0,0 +1,19 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// BuildListerExpansion allows custom methods to be added to +// BuildLister. +type BuildListerExpansion interface{} + +// BuildNamespaceListerExpansion allows custom methods to be added to +// BuildNamespaceLister. +type BuildNamespaceListerExpansion interface{} + +// BuildConfigListerExpansion allows custom methods to be added to +// BuildConfigLister. +type BuildConfigListerExpansion interface{} + +// BuildConfigNamespaceListerExpansion allows custom methods to be added to +// BuildConfigNamespaceLister. +type BuildConfigNamespaceListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/alibabacloudplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/alibabacloudplatformstatus.go new file mode 100644 index 0000000000000..e763d14f6e33d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/alibabacloudplatformstatus.go @@ -0,0 +1,46 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AlibabaCloudPlatformStatusApplyConfiguration represents a declarative configuration of the AlibabaCloudPlatformStatus type for use +// with apply. +type AlibabaCloudPlatformStatusApplyConfiguration struct { + Region *string `json:"region,omitempty"` + ResourceGroupID *string `json:"resourceGroupID,omitempty"` + ResourceTags []AlibabaCloudResourceTagApplyConfiguration `json:"resourceTags,omitempty"` +} + +// AlibabaCloudPlatformStatusApplyConfiguration constructs a declarative configuration of the AlibabaCloudPlatformStatus type for use with +// apply. +func AlibabaCloudPlatformStatus() *AlibabaCloudPlatformStatusApplyConfiguration { + return &AlibabaCloudPlatformStatusApplyConfiguration{} +} + +// WithRegion sets the Region field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Region field is set to the value of the last call. +func (b *AlibabaCloudPlatformStatusApplyConfiguration) WithRegion(value string) *AlibabaCloudPlatformStatusApplyConfiguration { + b.Region = &value + return b +} + +// WithResourceGroupID sets the ResourceGroupID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceGroupID field is set to the value of the last call. +func (b *AlibabaCloudPlatformStatusApplyConfiguration) WithResourceGroupID(value string) *AlibabaCloudPlatformStatusApplyConfiguration { + b.ResourceGroupID = &value + return b +} + +// WithResourceTags adds the given value to the ResourceTags field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceTags field. +func (b *AlibabaCloudPlatformStatusApplyConfiguration) WithResourceTags(values ...*AlibabaCloudResourceTagApplyConfiguration) *AlibabaCloudPlatformStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceTags") + } + b.ResourceTags = append(b.ResourceTags, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/alibabacloudresourcetag.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/alibabacloudresourcetag.go new file mode 100644 index 0000000000000..38fef6d50a30c --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/alibabacloudresourcetag.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AlibabaCloudResourceTagApplyConfiguration represents a declarative configuration of the AlibabaCloudResourceTag type for use +// with apply. +type AlibabaCloudResourceTagApplyConfiguration struct { + Key *string `json:"key,omitempty"` + Value *string `json:"value,omitempty"` +} + +// AlibabaCloudResourceTagApplyConfiguration constructs a declarative configuration of the AlibabaCloudResourceTag type for use with +// apply. +func AlibabaCloudResourceTag() *AlibabaCloudResourceTagApplyConfiguration { + return &AlibabaCloudResourceTagApplyConfiguration{} +} + +// WithKey sets the Key field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Key field is set to the value of the last call. +func (b *AlibabaCloudResourceTagApplyConfiguration) WithKey(value string) *AlibabaCloudResourceTagApplyConfiguration { + b.Key = &value + return b +} + +// WithValue sets the Value field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Value field is set to the value of the last call. +func (b *AlibabaCloudResourceTagApplyConfiguration) WithValue(value string) *AlibabaCloudResourceTagApplyConfiguration { + b.Value = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserver.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserver.go new file mode 100644 index 0000000000000..0d2c3e4f8d3ae --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserver.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// APIServerApplyConfiguration represents a declarative configuration of the APIServer type for use +// with apply. +type APIServerApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *APIServerSpecApplyConfiguration `json:"spec,omitempty"` + Status *configv1.APIServerStatus `json:"status,omitempty"` +} + +// APIServer constructs a declarative configuration of the APIServer type for use with +// apply. +func APIServer(name string) *APIServerApplyConfiguration { + b := &APIServerApplyConfiguration{} + b.WithName(name) + b.WithKind("APIServer") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractAPIServer extracts the applied configuration owned by fieldManager from +// aPIServer. If no managedFields are found in aPIServer for fieldManager, a +// APIServerApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// aPIServer must be a unmodified APIServer API object that was retrieved from the Kubernetes API. +// ExtractAPIServer provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractAPIServer(aPIServer *configv1.APIServer, fieldManager string) (*APIServerApplyConfiguration, error) { + return extractAPIServer(aPIServer, fieldManager, "") +} + +// ExtractAPIServerStatus is the same as ExtractAPIServer except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractAPIServerStatus(aPIServer *configv1.APIServer, fieldManager string) (*APIServerApplyConfiguration, error) { + return extractAPIServer(aPIServer, fieldManager, "status") +} + +func extractAPIServer(aPIServer *configv1.APIServer, fieldManager string, subresource string) (*APIServerApplyConfiguration, error) { + b := &APIServerApplyConfiguration{} + err := managedfields.ExtractInto(aPIServer, internal.Parser().Type("com.github.openshift.api.config.v1.APIServer"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(aPIServer.Name) + + b.WithKind("APIServer") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *APIServerApplyConfiguration) WithKind(value string) *APIServerApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *APIServerApplyConfiguration) WithAPIVersion(value string) *APIServerApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *APIServerApplyConfiguration) WithName(value string) *APIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *APIServerApplyConfiguration) WithGenerateName(value string) *APIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *APIServerApplyConfiguration) WithNamespace(value string) *APIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *APIServerApplyConfiguration) WithUID(value types.UID) *APIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *APIServerApplyConfiguration) WithResourceVersion(value string) *APIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *APIServerApplyConfiguration) WithGeneration(value int64) *APIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *APIServerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *APIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *APIServerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *APIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *APIServerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *APIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *APIServerApplyConfiguration) WithLabels(entries map[string]string) *APIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *APIServerApplyConfiguration) WithAnnotations(entries map[string]string) *APIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *APIServerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *APIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *APIServerApplyConfiguration) WithFinalizers(values ...string) *APIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *APIServerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *APIServerApplyConfiguration) WithSpec(value *APIServerSpecApplyConfiguration) *APIServerApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *APIServerApplyConfiguration) WithStatus(value configv1.APIServerStatus) *APIServerApplyConfiguration { + b.Status = &value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *APIServerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverencryption.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverencryption.go new file mode 100644 index 0000000000000..6f0deb125a313 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverencryption.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// APIServerEncryptionApplyConfiguration represents a declarative configuration of the APIServerEncryption type for use +// with apply. +type APIServerEncryptionApplyConfiguration struct { + Type *configv1.EncryptionType `json:"type,omitempty"` +} + +// APIServerEncryptionApplyConfiguration constructs a declarative configuration of the APIServerEncryption type for use with +// apply. +func APIServerEncryption() *APIServerEncryptionApplyConfiguration { + return &APIServerEncryptionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *APIServerEncryptionApplyConfiguration) WithType(value configv1.EncryptionType) *APIServerEncryptionApplyConfiguration { + b.Type = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiservernamedservingcert.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiservernamedservingcert.go new file mode 100644 index 0000000000000..ae1f76215d75b --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiservernamedservingcert.go @@ -0,0 +1,34 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// APIServerNamedServingCertApplyConfiguration represents a declarative configuration of the APIServerNamedServingCert type for use +// with apply. +type APIServerNamedServingCertApplyConfiguration struct { + Names []string `json:"names,omitempty"` + ServingCertificate *SecretNameReferenceApplyConfiguration `json:"servingCertificate,omitempty"` +} + +// APIServerNamedServingCertApplyConfiguration constructs a declarative configuration of the APIServerNamedServingCert type for use with +// apply. +func APIServerNamedServingCert() *APIServerNamedServingCertApplyConfiguration { + return &APIServerNamedServingCertApplyConfiguration{} +} + +// WithNames adds the given value to the Names field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Names field. +func (b *APIServerNamedServingCertApplyConfiguration) WithNames(values ...string) *APIServerNamedServingCertApplyConfiguration { + for i := range values { + b.Names = append(b.Names, values[i]) + } + return b +} + +// WithServingCertificate sets the ServingCertificate field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServingCertificate field is set to the value of the last call. +func (b *APIServerNamedServingCertApplyConfiguration) WithServingCertificate(value *SecretNameReferenceApplyConfiguration) *APIServerNamedServingCertApplyConfiguration { + b.ServingCertificate = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverservingcerts.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverservingcerts.go new file mode 100644 index 0000000000000..963bea3053b11 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverservingcerts.go @@ -0,0 +1,28 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// APIServerServingCertsApplyConfiguration represents a declarative configuration of the APIServerServingCerts type for use +// with apply. +type APIServerServingCertsApplyConfiguration struct { + NamedCertificates []APIServerNamedServingCertApplyConfiguration `json:"namedCertificates,omitempty"` +} + +// APIServerServingCertsApplyConfiguration constructs a declarative configuration of the APIServerServingCerts type for use with +// apply. +func APIServerServingCerts() *APIServerServingCertsApplyConfiguration { + return &APIServerServingCertsApplyConfiguration{} +} + +// WithNamedCertificates adds the given value to the NamedCertificates field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NamedCertificates field. +func (b *APIServerServingCertsApplyConfiguration) WithNamedCertificates(values ...*APIServerNamedServingCertApplyConfiguration) *APIServerServingCertsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithNamedCertificates") + } + b.NamedCertificates = append(b.NamedCertificates, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverspec.go new file mode 100644 index 0000000000000..58f4b0eece5d1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverspec.go @@ -0,0 +1,70 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// APIServerSpecApplyConfiguration represents a declarative configuration of the APIServerSpec type for use +// with apply. +type APIServerSpecApplyConfiguration struct { + ServingCerts *APIServerServingCertsApplyConfiguration `json:"servingCerts,omitempty"` + ClientCA *ConfigMapNameReferenceApplyConfiguration `json:"clientCA,omitempty"` + AdditionalCORSAllowedOrigins []string `json:"additionalCORSAllowedOrigins,omitempty"` + Encryption *APIServerEncryptionApplyConfiguration `json:"encryption,omitempty"` + TLSSecurityProfile *TLSSecurityProfileApplyConfiguration `json:"tlsSecurityProfile,omitempty"` + Audit *AuditApplyConfiguration `json:"audit,omitempty"` +} + +// APIServerSpecApplyConfiguration constructs a declarative configuration of the APIServerSpec type for use with +// apply. +func APIServerSpec() *APIServerSpecApplyConfiguration { + return &APIServerSpecApplyConfiguration{} +} + +// WithServingCerts sets the ServingCerts field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServingCerts field is set to the value of the last call. +func (b *APIServerSpecApplyConfiguration) WithServingCerts(value *APIServerServingCertsApplyConfiguration) *APIServerSpecApplyConfiguration { + b.ServingCerts = value + return b +} + +// WithClientCA sets the ClientCA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientCA field is set to the value of the last call. +func (b *APIServerSpecApplyConfiguration) WithClientCA(value *ConfigMapNameReferenceApplyConfiguration) *APIServerSpecApplyConfiguration { + b.ClientCA = value + return b +} + +// WithAdditionalCORSAllowedOrigins adds the given value to the AdditionalCORSAllowedOrigins field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AdditionalCORSAllowedOrigins field. +func (b *APIServerSpecApplyConfiguration) WithAdditionalCORSAllowedOrigins(values ...string) *APIServerSpecApplyConfiguration { + for i := range values { + b.AdditionalCORSAllowedOrigins = append(b.AdditionalCORSAllowedOrigins, values[i]) + } + return b +} + +// WithEncryption sets the Encryption field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Encryption field is set to the value of the last call. +func (b *APIServerSpecApplyConfiguration) WithEncryption(value *APIServerEncryptionApplyConfiguration) *APIServerSpecApplyConfiguration { + b.Encryption = value + return b +} + +// WithTLSSecurityProfile sets the TLSSecurityProfile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSSecurityProfile field is set to the value of the last call. +func (b *APIServerSpecApplyConfiguration) WithTLSSecurityProfile(value *TLSSecurityProfileApplyConfiguration) *APIServerSpecApplyConfiguration { + b.TLSSecurityProfile = value + return b +} + +// WithAudit sets the Audit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Audit field is set to the value of the last call. +func (b *APIServerSpecApplyConfiguration) WithAudit(value *AuditApplyConfiguration) *APIServerSpecApplyConfiguration { + b.Audit = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/audit.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/audit.go new file mode 100644 index 0000000000000..a07c9788c3852 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/audit.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// AuditApplyConfiguration represents a declarative configuration of the Audit type for use +// with apply. +type AuditApplyConfiguration struct { + Profile *configv1.AuditProfileType `json:"profile,omitempty"` + CustomRules []AuditCustomRuleApplyConfiguration `json:"customRules,omitempty"` +} + +// AuditApplyConfiguration constructs a declarative configuration of the Audit type for use with +// apply. +func Audit() *AuditApplyConfiguration { + return &AuditApplyConfiguration{} +} + +// WithProfile sets the Profile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Profile field is set to the value of the last call. +func (b *AuditApplyConfiguration) WithProfile(value configv1.AuditProfileType) *AuditApplyConfiguration { + b.Profile = &value + return b +} + +// WithCustomRules adds the given value to the CustomRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the CustomRules field. +func (b *AuditApplyConfiguration) WithCustomRules(values ...*AuditCustomRuleApplyConfiguration) *AuditApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithCustomRules") + } + b.CustomRules = append(b.CustomRules, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/auditcustomrule.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/auditcustomrule.go new file mode 100644 index 0000000000000..33a696d77f087 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/auditcustomrule.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// AuditCustomRuleApplyConfiguration represents a declarative configuration of the AuditCustomRule type for use +// with apply. +type AuditCustomRuleApplyConfiguration struct { + Group *string `json:"group,omitempty"` + Profile *configv1.AuditProfileType `json:"profile,omitempty"` +} + +// AuditCustomRuleApplyConfiguration constructs a declarative configuration of the AuditCustomRule type for use with +// apply. +func AuditCustomRule() *AuditCustomRuleApplyConfiguration { + return &AuditCustomRuleApplyConfiguration{} +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *AuditCustomRuleApplyConfiguration) WithGroup(value string) *AuditCustomRuleApplyConfiguration { + b.Group = &value + return b +} + +// WithProfile sets the Profile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Profile field is set to the value of the last call. +func (b *AuditCustomRuleApplyConfiguration) WithProfile(value configv1.AuditProfileType) *AuditCustomRuleApplyConfiguration { + b.Profile = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authentication.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authentication.go new file mode 100644 index 0000000000000..6ae8497a5b03a --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authentication.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// AuthenticationApplyConfiguration represents a declarative configuration of the Authentication type for use +// with apply. +type AuthenticationApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *AuthenticationSpecApplyConfiguration `json:"spec,omitempty"` + Status *AuthenticationStatusApplyConfiguration `json:"status,omitempty"` +} + +// Authentication constructs a declarative configuration of the Authentication type for use with +// apply. +func Authentication(name string) *AuthenticationApplyConfiguration { + b := &AuthenticationApplyConfiguration{} + b.WithName(name) + b.WithKind("Authentication") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractAuthentication extracts the applied configuration owned by fieldManager from +// authentication. If no managedFields are found in authentication for fieldManager, a +// AuthenticationApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// authentication must be a unmodified Authentication API object that was retrieved from the Kubernetes API. +// ExtractAuthentication provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractAuthentication(authentication *configv1.Authentication, fieldManager string) (*AuthenticationApplyConfiguration, error) { + return extractAuthentication(authentication, fieldManager, "") +} + +// ExtractAuthenticationStatus is the same as ExtractAuthentication except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractAuthenticationStatus(authentication *configv1.Authentication, fieldManager string) (*AuthenticationApplyConfiguration, error) { + return extractAuthentication(authentication, fieldManager, "status") +} + +func extractAuthentication(authentication *configv1.Authentication, fieldManager string, subresource string) (*AuthenticationApplyConfiguration, error) { + b := &AuthenticationApplyConfiguration{} + err := managedfields.ExtractInto(authentication, internal.Parser().Type("com.github.openshift.api.config.v1.Authentication"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(authentication.Name) + + b.WithKind("Authentication") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithKind(value string) *AuthenticationApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithAPIVersion(value string) *AuthenticationApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithName(value string) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithGenerateName(value string) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithNamespace(value string) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithUID(value types.UID) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithResourceVersion(value string) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithGeneration(value int64) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *AuthenticationApplyConfiguration) WithLabels(entries map[string]string) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *AuthenticationApplyConfiguration) WithAnnotations(entries map[string]string) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *AuthenticationApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *AuthenticationApplyConfiguration) WithFinalizers(values ...string) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *AuthenticationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithSpec(value *AuthenticationSpecApplyConfiguration) *AuthenticationApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithStatus(value *AuthenticationStatusApplyConfiguration) *AuthenticationApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *AuthenticationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationspec.go new file mode 100644 index 0000000000000..b2ac362786cd6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationspec.go @@ -0,0 +1,82 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// AuthenticationSpecApplyConfiguration represents a declarative configuration of the AuthenticationSpec type for use +// with apply. +type AuthenticationSpecApplyConfiguration struct { + Type *configv1.AuthenticationType `json:"type,omitempty"` + OAuthMetadata *ConfigMapNameReferenceApplyConfiguration `json:"oauthMetadata,omitempty"` + WebhookTokenAuthenticators []DeprecatedWebhookTokenAuthenticatorApplyConfiguration `json:"webhookTokenAuthenticators,omitempty"` + WebhookTokenAuthenticator *WebhookTokenAuthenticatorApplyConfiguration `json:"webhookTokenAuthenticator,omitempty"` + ServiceAccountIssuer *string `json:"serviceAccountIssuer,omitempty"` + OIDCProviders []OIDCProviderApplyConfiguration `json:"oidcProviders,omitempty"` +} + +// AuthenticationSpecApplyConfiguration constructs a declarative configuration of the AuthenticationSpec type for use with +// apply. +func AuthenticationSpec() *AuthenticationSpecApplyConfiguration { + return &AuthenticationSpecApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *AuthenticationSpecApplyConfiguration) WithType(value configv1.AuthenticationType) *AuthenticationSpecApplyConfiguration { + b.Type = &value + return b +} + +// WithOAuthMetadata sets the OAuthMetadata field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OAuthMetadata field is set to the value of the last call. +func (b *AuthenticationSpecApplyConfiguration) WithOAuthMetadata(value *ConfigMapNameReferenceApplyConfiguration) *AuthenticationSpecApplyConfiguration { + b.OAuthMetadata = value + return b +} + +// WithWebhookTokenAuthenticators adds the given value to the WebhookTokenAuthenticators field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the WebhookTokenAuthenticators field. +func (b *AuthenticationSpecApplyConfiguration) WithWebhookTokenAuthenticators(values ...*DeprecatedWebhookTokenAuthenticatorApplyConfiguration) *AuthenticationSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithWebhookTokenAuthenticators") + } + b.WebhookTokenAuthenticators = append(b.WebhookTokenAuthenticators, *values[i]) + } + return b +} + +// WithWebhookTokenAuthenticator sets the WebhookTokenAuthenticator field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WebhookTokenAuthenticator field is set to the value of the last call. +func (b *AuthenticationSpecApplyConfiguration) WithWebhookTokenAuthenticator(value *WebhookTokenAuthenticatorApplyConfiguration) *AuthenticationSpecApplyConfiguration { + b.WebhookTokenAuthenticator = value + return b +} + +// WithServiceAccountIssuer sets the ServiceAccountIssuer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceAccountIssuer field is set to the value of the last call. +func (b *AuthenticationSpecApplyConfiguration) WithServiceAccountIssuer(value string) *AuthenticationSpecApplyConfiguration { + b.ServiceAccountIssuer = &value + return b +} + +// WithOIDCProviders adds the given value to the OIDCProviders field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OIDCProviders field. +func (b *AuthenticationSpecApplyConfiguration) WithOIDCProviders(values ...*OIDCProviderApplyConfiguration) *AuthenticationSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOIDCProviders") + } + b.OIDCProviders = append(b.OIDCProviders, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationstatus.go new file mode 100644 index 0000000000000..1539f164b4ba9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationstatus.go @@ -0,0 +1,37 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AuthenticationStatusApplyConfiguration represents a declarative configuration of the AuthenticationStatus type for use +// with apply. +type AuthenticationStatusApplyConfiguration struct { + IntegratedOAuthMetadata *ConfigMapNameReferenceApplyConfiguration `json:"integratedOAuthMetadata,omitempty"` + OIDCClients []OIDCClientStatusApplyConfiguration `json:"oidcClients,omitempty"` +} + +// AuthenticationStatusApplyConfiguration constructs a declarative configuration of the AuthenticationStatus type for use with +// apply. +func AuthenticationStatus() *AuthenticationStatusApplyConfiguration { + return &AuthenticationStatusApplyConfiguration{} +} + +// WithIntegratedOAuthMetadata sets the IntegratedOAuthMetadata field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IntegratedOAuthMetadata field is set to the value of the last call. +func (b *AuthenticationStatusApplyConfiguration) WithIntegratedOAuthMetadata(value *ConfigMapNameReferenceApplyConfiguration) *AuthenticationStatusApplyConfiguration { + b.IntegratedOAuthMetadata = value + return b +} + +// WithOIDCClients adds the given value to the OIDCClients field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OIDCClients field. +func (b *AuthenticationStatusApplyConfiguration) WithOIDCClients(values ...*OIDCClientStatusApplyConfiguration) *AuthenticationStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOIDCClients") + } + b.OIDCClients = append(b.OIDCClients, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsdnsspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsdnsspec.go new file mode 100644 index 0000000000000..8ad662e23c9c3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsdnsspec.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AWSDNSSpecApplyConfiguration represents a declarative configuration of the AWSDNSSpec type for use +// with apply. +type AWSDNSSpecApplyConfiguration struct { + PrivateZoneIAMRole *string `json:"privateZoneIAMRole,omitempty"` +} + +// AWSDNSSpecApplyConfiguration constructs a declarative configuration of the AWSDNSSpec type for use with +// apply. +func AWSDNSSpec() *AWSDNSSpecApplyConfiguration { + return &AWSDNSSpecApplyConfiguration{} +} + +// WithPrivateZoneIAMRole sets the PrivateZoneIAMRole field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PrivateZoneIAMRole field is set to the value of the last call. +func (b *AWSDNSSpecApplyConfiguration) WithPrivateZoneIAMRole(value string) *AWSDNSSpecApplyConfiguration { + b.PrivateZoneIAMRole = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsingressspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsingressspec.go new file mode 100644 index 0000000000000..e67e671117c7f --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsingressspec.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// AWSIngressSpecApplyConfiguration represents a declarative configuration of the AWSIngressSpec type for use +// with apply. +type AWSIngressSpecApplyConfiguration struct { + Type *configv1.AWSLBType `json:"type,omitempty"` +} + +// AWSIngressSpecApplyConfiguration constructs a declarative configuration of the AWSIngressSpec type for use with +// apply. +func AWSIngressSpec() *AWSIngressSpecApplyConfiguration { + return &AWSIngressSpecApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *AWSIngressSpecApplyConfiguration) WithType(value configv1.AWSLBType) *AWSIngressSpecApplyConfiguration { + b.Type = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformspec.go new file mode 100644 index 0000000000000..85361e7a2ced6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformspec.go @@ -0,0 +1,28 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AWSPlatformSpecApplyConfiguration represents a declarative configuration of the AWSPlatformSpec type for use +// with apply. +type AWSPlatformSpecApplyConfiguration struct { + ServiceEndpoints []AWSServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"` +} + +// AWSPlatformSpecApplyConfiguration constructs a declarative configuration of the AWSPlatformSpec type for use with +// apply. +func AWSPlatformSpec() *AWSPlatformSpecApplyConfiguration { + return &AWSPlatformSpecApplyConfiguration{} +} + +// WithServiceEndpoints adds the given value to the ServiceEndpoints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ServiceEndpoints field. +func (b *AWSPlatformSpecApplyConfiguration) WithServiceEndpoints(values ...*AWSServiceEndpointApplyConfiguration) *AWSPlatformSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithServiceEndpoints") + } + b.ServiceEndpoints = append(b.ServiceEndpoints, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go new file mode 100644 index 0000000000000..b217e5bdcd445 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go @@ -0,0 +1,60 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AWSPlatformStatusApplyConfiguration represents a declarative configuration of the AWSPlatformStatus type for use +// with apply. +type AWSPlatformStatusApplyConfiguration struct { + Region *string `json:"region,omitempty"` + ServiceEndpoints []AWSServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"` + ResourceTags []AWSResourceTagApplyConfiguration `json:"resourceTags,omitempty"` + CloudLoadBalancerConfig *CloudLoadBalancerConfigApplyConfiguration `json:"cloudLoadBalancerConfig,omitempty"` +} + +// AWSPlatformStatusApplyConfiguration constructs a declarative configuration of the AWSPlatformStatus type for use with +// apply. +func AWSPlatformStatus() *AWSPlatformStatusApplyConfiguration { + return &AWSPlatformStatusApplyConfiguration{} +} + +// WithRegion sets the Region field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Region field is set to the value of the last call. +func (b *AWSPlatformStatusApplyConfiguration) WithRegion(value string) *AWSPlatformStatusApplyConfiguration { + b.Region = &value + return b +} + +// WithServiceEndpoints adds the given value to the ServiceEndpoints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ServiceEndpoints field. +func (b *AWSPlatformStatusApplyConfiguration) WithServiceEndpoints(values ...*AWSServiceEndpointApplyConfiguration) *AWSPlatformStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithServiceEndpoints") + } + b.ServiceEndpoints = append(b.ServiceEndpoints, *values[i]) + } + return b +} + +// WithResourceTags adds the given value to the ResourceTags field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceTags field. +func (b *AWSPlatformStatusApplyConfiguration) WithResourceTags(values ...*AWSResourceTagApplyConfiguration) *AWSPlatformStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceTags") + } + b.ResourceTags = append(b.ResourceTags, *values[i]) + } + return b +} + +// WithCloudLoadBalancerConfig sets the CloudLoadBalancerConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CloudLoadBalancerConfig field is set to the value of the last call. +func (b *AWSPlatformStatusApplyConfiguration) WithCloudLoadBalancerConfig(value *CloudLoadBalancerConfigApplyConfiguration) *AWSPlatformStatusApplyConfiguration { + b.CloudLoadBalancerConfig = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsresourcetag.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsresourcetag.go new file mode 100644 index 0000000000000..766157a0732c7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsresourcetag.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AWSResourceTagApplyConfiguration represents a declarative configuration of the AWSResourceTag type for use +// with apply. +type AWSResourceTagApplyConfiguration struct { + Key *string `json:"key,omitempty"` + Value *string `json:"value,omitempty"` +} + +// AWSResourceTagApplyConfiguration constructs a declarative configuration of the AWSResourceTag type for use with +// apply. +func AWSResourceTag() *AWSResourceTagApplyConfiguration { + return &AWSResourceTagApplyConfiguration{} +} + +// WithKey sets the Key field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Key field is set to the value of the last call. +func (b *AWSResourceTagApplyConfiguration) WithKey(value string) *AWSResourceTagApplyConfiguration { + b.Key = &value + return b +} + +// WithValue sets the Value field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Value field is set to the value of the last call. +func (b *AWSResourceTagApplyConfiguration) WithValue(value string) *AWSResourceTagApplyConfiguration { + b.Value = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsserviceendpoint.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsserviceendpoint.go new file mode 100644 index 0000000000000..5d4f38882ed5c --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsserviceendpoint.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AWSServiceEndpointApplyConfiguration represents a declarative configuration of the AWSServiceEndpoint type for use +// with apply. +type AWSServiceEndpointApplyConfiguration struct { + Name *string `json:"name,omitempty"` + URL *string `json:"url,omitempty"` +} + +// AWSServiceEndpointApplyConfiguration constructs a declarative configuration of the AWSServiceEndpoint type for use with +// apply. +func AWSServiceEndpoint() *AWSServiceEndpointApplyConfiguration { + return &AWSServiceEndpointApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *AWSServiceEndpointApplyConfiguration) WithName(value string) *AWSServiceEndpointApplyConfiguration { + b.Name = &value + return b +} + +// WithURL sets the URL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the URL field is set to the value of the last call. +func (b *AWSServiceEndpointApplyConfiguration) WithURL(value string) *AWSServiceEndpointApplyConfiguration { + b.URL = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go new file mode 100644 index 0000000000000..5348a3c99ff4f --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go @@ -0,0 +1,68 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// AzurePlatformStatusApplyConfiguration represents a declarative configuration of the AzurePlatformStatus type for use +// with apply. +type AzurePlatformStatusApplyConfiguration struct { + ResourceGroupName *string `json:"resourceGroupName,omitempty"` + NetworkResourceGroupName *string `json:"networkResourceGroupName,omitempty"` + CloudName *configv1.AzureCloudEnvironment `json:"cloudName,omitempty"` + ARMEndpoint *string `json:"armEndpoint,omitempty"` + ResourceTags []AzureResourceTagApplyConfiguration `json:"resourceTags,omitempty"` +} + +// AzurePlatformStatusApplyConfiguration constructs a declarative configuration of the AzurePlatformStatus type for use with +// apply. +func AzurePlatformStatus() *AzurePlatformStatusApplyConfiguration { + return &AzurePlatformStatusApplyConfiguration{} +} + +// WithResourceGroupName sets the ResourceGroupName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceGroupName field is set to the value of the last call. +func (b *AzurePlatformStatusApplyConfiguration) WithResourceGroupName(value string) *AzurePlatformStatusApplyConfiguration { + b.ResourceGroupName = &value + return b +} + +// WithNetworkResourceGroupName sets the NetworkResourceGroupName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NetworkResourceGroupName field is set to the value of the last call. +func (b *AzurePlatformStatusApplyConfiguration) WithNetworkResourceGroupName(value string) *AzurePlatformStatusApplyConfiguration { + b.NetworkResourceGroupName = &value + return b +} + +// WithCloudName sets the CloudName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CloudName field is set to the value of the last call. +func (b *AzurePlatformStatusApplyConfiguration) WithCloudName(value configv1.AzureCloudEnvironment) *AzurePlatformStatusApplyConfiguration { + b.CloudName = &value + return b +} + +// WithARMEndpoint sets the ARMEndpoint field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ARMEndpoint field is set to the value of the last call. +func (b *AzurePlatformStatusApplyConfiguration) WithARMEndpoint(value string) *AzurePlatformStatusApplyConfiguration { + b.ARMEndpoint = &value + return b +} + +// WithResourceTags adds the given value to the ResourceTags field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceTags field. +func (b *AzurePlatformStatusApplyConfiguration) WithResourceTags(values ...*AzureResourceTagApplyConfiguration) *AzurePlatformStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceTags") + } + b.ResourceTags = append(b.ResourceTags, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureresourcetag.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureresourcetag.go new file mode 100644 index 0000000000000..980d2a1684082 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureresourcetag.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AzureResourceTagApplyConfiguration represents a declarative configuration of the AzureResourceTag type for use +// with apply. +type AzureResourceTagApplyConfiguration struct { + Key *string `json:"key,omitempty"` + Value *string `json:"value,omitempty"` +} + +// AzureResourceTagApplyConfiguration constructs a declarative configuration of the AzureResourceTag type for use with +// apply. +func AzureResourceTag() *AzureResourceTagApplyConfiguration { + return &AzureResourceTagApplyConfiguration{} +} + +// WithKey sets the Key field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Key field is set to the value of the last call. +func (b *AzureResourceTagApplyConfiguration) WithKey(value string) *AzureResourceTagApplyConfiguration { + b.Key = &value + return b +} + +// WithValue sets the Value field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Value field is set to the value of the last call. +func (b *AzureResourceTagApplyConfiguration) WithValue(value string) *AzureResourceTagApplyConfiguration { + b.Value = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformloadbalancer.go new file mode 100644 index 0000000000000..4a7405ad89756 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformloadbalancer.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// BareMetalPlatformLoadBalancerApplyConfiguration represents a declarative configuration of the BareMetalPlatformLoadBalancer type for use +// with apply. +type BareMetalPlatformLoadBalancerApplyConfiguration struct { + Type *configv1.PlatformLoadBalancerType `json:"type,omitempty"` +} + +// BareMetalPlatformLoadBalancerApplyConfiguration constructs a declarative configuration of the BareMetalPlatformLoadBalancer type for use with +// apply. +func BareMetalPlatformLoadBalancer() *BareMetalPlatformLoadBalancerApplyConfiguration { + return &BareMetalPlatformLoadBalancerApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *BareMetalPlatformLoadBalancerApplyConfiguration) WithType(value configv1.PlatformLoadBalancerType) *BareMetalPlatformLoadBalancerApplyConfiguration { + b.Type = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformspec.go new file mode 100644 index 0000000000000..81d8087751fd8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformspec.go @@ -0,0 +1,51 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// BareMetalPlatformSpecApplyConfiguration represents a declarative configuration of the BareMetalPlatformSpec type for use +// with apply. +type BareMetalPlatformSpecApplyConfiguration struct { + APIServerInternalIPs []configv1.IP `json:"apiServerInternalIPs,omitempty"` + IngressIPs []configv1.IP `json:"ingressIPs,omitempty"` + MachineNetworks []configv1.CIDR `json:"machineNetworks,omitempty"` +} + +// BareMetalPlatformSpecApplyConfiguration constructs a declarative configuration of the BareMetalPlatformSpec type for use with +// apply. +func BareMetalPlatformSpec() *BareMetalPlatformSpecApplyConfiguration { + return &BareMetalPlatformSpecApplyConfiguration{} +} + +// WithAPIServerInternalIPs adds the given value to the APIServerInternalIPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the APIServerInternalIPs field. +func (b *BareMetalPlatformSpecApplyConfiguration) WithAPIServerInternalIPs(values ...configv1.IP) *BareMetalPlatformSpecApplyConfiguration { + for i := range values { + b.APIServerInternalIPs = append(b.APIServerInternalIPs, values[i]) + } + return b +} + +// WithIngressIPs adds the given value to the IngressIPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the IngressIPs field. +func (b *BareMetalPlatformSpecApplyConfiguration) WithIngressIPs(values ...configv1.IP) *BareMetalPlatformSpecApplyConfiguration { + for i := range values { + b.IngressIPs = append(b.IngressIPs, values[i]) + } + return b +} + +// WithMachineNetworks adds the given value to the MachineNetworks field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MachineNetworks field. +func (b *BareMetalPlatformSpecApplyConfiguration) WithMachineNetworks(values ...configv1.CIDR) *BareMetalPlatformSpecApplyConfiguration { + for i := range values { + b.MachineNetworks = append(b.MachineNetworks, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformstatus.go new file mode 100644 index 0000000000000..55b875c7c469c --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformstatus.go @@ -0,0 +1,87 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// BareMetalPlatformStatusApplyConfiguration represents a declarative configuration of the BareMetalPlatformStatus type for use +// with apply. +type BareMetalPlatformStatusApplyConfiguration struct { + APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` + APIServerInternalIPs []string `json:"apiServerInternalIPs,omitempty"` + IngressIP *string `json:"ingressIP,omitempty"` + IngressIPs []string `json:"ingressIPs,omitempty"` + NodeDNSIP *string `json:"nodeDNSIP,omitempty"` + LoadBalancer *BareMetalPlatformLoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"` + MachineNetworks []configv1.CIDR `json:"machineNetworks,omitempty"` +} + +// BareMetalPlatformStatusApplyConfiguration constructs a declarative configuration of the BareMetalPlatformStatus type for use with +// apply. +func BareMetalPlatformStatus() *BareMetalPlatformStatusApplyConfiguration { + return &BareMetalPlatformStatusApplyConfiguration{} +} + +// WithAPIServerInternalIP sets the APIServerInternalIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIServerInternalIP field is set to the value of the last call. +func (b *BareMetalPlatformStatusApplyConfiguration) WithAPIServerInternalIP(value string) *BareMetalPlatformStatusApplyConfiguration { + b.APIServerInternalIP = &value + return b +} + +// WithAPIServerInternalIPs adds the given value to the APIServerInternalIPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the APIServerInternalIPs field. +func (b *BareMetalPlatformStatusApplyConfiguration) WithAPIServerInternalIPs(values ...string) *BareMetalPlatformStatusApplyConfiguration { + for i := range values { + b.APIServerInternalIPs = append(b.APIServerInternalIPs, values[i]) + } + return b +} + +// WithIngressIP sets the IngressIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IngressIP field is set to the value of the last call. +func (b *BareMetalPlatformStatusApplyConfiguration) WithIngressIP(value string) *BareMetalPlatformStatusApplyConfiguration { + b.IngressIP = &value + return b +} + +// WithIngressIPs adds the given value to the IngressIPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the IngressIPs field. +func (b *BareMetalPlatformStatusApplyConfiguration) WithIngressIPs(values ...string) *BareMetalPlatformStatusApplyConfiguration { + for i := range values { + b.IngressIPs = append(b.IngressIPs, values[i]) + } + return b +} + +// WithNodeDNSIP sets the NodeDNSIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeDNSIP field is set to the value of the last call. +func (b *BareMetalPlatformStatusApplyConfiguration) WithNodeDNSIP(value string) *BareMetalPlatformStatusApplyConfiguration { + b.NodeDNSIP = &value + return b +} + +// WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LoadBalancer field is set to the value of the last call. +func (b *BareMetalPlatformStatusApplyConfiguration) WithLoadBalancer(value *BareMetalPlatformLoadBalancerApplyConfiguration) *BareMetalPlatformStatusApplyConfiguration { + b.LoadBalancer = value + return b +} + +// WithMachineNetworks adds the given value to the MachineNetworks field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MachineNetworks field. +func (b *BareMetalPlatformStatusApplyConfiguration) WithMachineNetworks(values ...configv1.CIDR) *BareMetalPlatformStatusApplyConfiguration { + for i := range values { + b.MachineNetworks = append(b.MachineNetworks, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/basicauthidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/basicauthidentityprovider.go new file mode 100644 index 0000000000000..88f30314dfeb3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/basicauthidentityprovider.go @@ -0,0 +1,47 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// BasicAuthIdentityProviderApplyConfiguration represents a declarative configuration of the BasicAuthIdentityProvider type for use +// with apply. +type BasicAuthIdentityProviderApplyConfiguration struct { + OAuthRemoteConnectionInfoApplyConfiguration `json:",inline"` +} + +// BasicAuthIdentityProviderApplyConfiguration constructs a declarative configuration of the BasicAuthIdentityProvider type for use with +// apply. +func BasicAuthIdentityProvider() *BasicAuthIdentityProviderApplyConfiguration { + return &BasicAuthIdentityProviderApplyConfiguration{} +} + +// WithURL sets the URL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the URL field is set to the value of the last call. +func (b *BasicAuthIdentityProviderApplyConfiguration) WithURL(value string) *BasicAuthIdentityProviderApplyConfiguration { + b.OAuthRemoteConnectionInfoApplyConfiguration.URL = &value + return b +} + +// WithCA sets the CA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CA field is set to the value of the last call. +func (b *BasicAuthIdentityProviderApplyConfiguration) WithCA(value *ConfigMapNameReferenceApplyConfiguration) *BasicAuthIdentityProviderApplyConfiguration { + b.OAuthRemoteConnectionInfoApplyConfiguration.CA = value + return b +} + +// WithTLSClientCert sets the TLSClientCert field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSClientCert field is set to the value of the last call. +func (b *BasicAuthIdentityProviderApplyConfiguration) WithTLSClientCert(value *SecretNameReferenceApplyConfiguration) *BasicAuthIdentityProviderApplyConfiguration { + b.OAuthRemoteConnectionInfoApplyConfiguration.TLSClientCert = value + return b +} + +// WithTLSClientKey sets the TLSClientKey field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSClientKey field is set to the value of the last call. +func (b *BasicAuthIdentityProviderApplyConfiguration) WithTLSClientKey(value *SecretNameReferenceApplyConfiguration) *BasicAuthIdentityProviderApplyConfiguration { + b.OAuthRemoteConnectionInfoApplyConfiguration.TLSClientKey = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/build.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/build.go new file mode 100644 index 0000000000000..cdadabcae7877 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/build.go @@ -0,0 +1,237 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// BuildApplyConfiguration represents a declarative configuration of the Build type for use +// with apply. +type BuildApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *BuildSpecApplyConfiguration `json:"spec,omitempty"` +} + +// Build constructs a declarative configuration of the Build type for use with +// apply. +func Build(name string) *BuildApplyConfiguration { + b := &BuildApplyConfiguration{} + b.WithName(name) + b.WithKind("Build") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractBuild extracts the applied configuration owned by fieldManager from +// build. If no managedFields are found in build for fieldManager, a +// BuildApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// build must be a unmodified Build API object that was retrieved from the Kubernetes API. +// ExtractBuild provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractBuild(build *configv1.Build, fieldManager string) (*BuildApplyConfiguration, error) { + return extractBuild(build, fieldManager, "") +} + +// ExtractBuildStatus is the same as ExtractBuild except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractBuildStatus(build *configv1.Build, fieldManager string) (*BuildApplyConfiguration, error) { + return extractBuild(build, fieldManager, "status") +} + +func extractBuild(build *configv1.Build, fieldManager string, subresource string) (*BuildApplyConfiguration, error) { + b := &BuildApplyConfiguration{} + err := managedfields.ExtractInto(build, internal.Parser().Type("com.github.openshift.api.config.v1.Build"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(build.Name) + + b.WithKind("Build") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithKind(value string) *BuildApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithAPIVersion(value string) *BuildApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithName(value string) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithGenerateName(value string) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithNamespace(value string) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithUID(value types.UID) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithResourceVersion(value string) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithGeneration(value int64) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *BuildApplyConfiguration) WithLabels(entries map[string]string) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *BuildApplyConfiguration) WithAnnotations(entries map[string]string) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *BuildApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *BuildApplyConfiguration) WithFinalizers(values ...string) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *BuildApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithSpec(value *BuildSpecApplyConfiguration) *BuildApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *BuildApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/builddefaults.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/builddefaults.go new file mode 100644 index 0000000000000..ece9244191b41 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/builddefaults.go @@ -0,0 +1,70 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// BuildDefaultsApplyConfiguration represents a declarative configuration of the BuildDefaults type for use +// with apply. +type BuildDefaultsApplyConfiguration struct { + DefaultProxy *ProxySpecApplyConfiguration `json:"defaultProxy,omitempty"` + GitProxy *ProxySpecApplyConfiguration `json:"gitProxy,omitempty"` + Env []corev1.EnvVar `json:"env,omitempty"` + ImageLabels []ImageLabelApplyConfiguration `json:"imageLabels,omitempty"` + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` +} + +// BuildDefaultsApplyConfiguration constructs a declarative configuration of the BuildDefaults type for use with +// apply. +func BuildDefaults() *BuildDefaultsApplyConfiguration { + return &BuildDefaultsApplyConfiguration{} +} + +// WithDefaultProxy sets the DefaultProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DefaultProxy field is set to the value of the last call. +func (b *BuildDefaultsApplyConfiguration) WithDefaultProxy(value *ProxySpecApplyConfiguration) *BuildDefaultsApplyConfiguration { + b.DefaultProxy = value + return b +} + +// WithGitProxy sets the GitProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GitProxy field is set to the value of the last call. +func (b *BuildDefaultsApplyConfiguration) WithGitProxy(value *ProxySpecApplyConfiguration) *BuildDefaultsApplyConfiguration { + b.GitProxy = value + return b +} + +// WithEnv adds the given value to the Env field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Env field. +func (b *BuildDefaultsApplyConfiguration) WithEnv(values ...corev1.EnvVar) *BuildDefaultsApplyConfiguration { + for i := range values { + b.Env = append(b.Env, values[i]) + } + return b +} + +// WithImageLabels adds the given value to the ImageLabels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ImageLabels field. +func (b *BuildDefaultsApplyConfiguration) WithImageLabels(values ...*ImageLabelApplyConfiguration) *BuildDefaultsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithImageLabels") + } + b.ImageLabels = append(b.ImageLabels, *values[i]) + } + return b +} + +// WithResources sets the Resources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resources field is set to the value of the last call. +func (b *BuildDefaultsApplyConfiguration) WithResources(value corev1.ResourceRequirements) *BuildDefaultsApplyConfiguration { + b.Resources = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/buildoverrides.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/buildoverrides.go new file mode 100644 index 0000000000000..948bc9e8a2e08 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/buildoverrides.go @@ -0,0 +1,67 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// BuildOverridesApplyConfiguration represents a declarative configuration of the BuildOverrides type for use +// with apply. +type BuildOverridesApplyConfiguration struct { + ImageLabels []ImageLabelApplyConfiguration `json:"imageLabels,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + ForcePull *bool `json:"forcePull,omitempty"` +} + +// BuildOverridesApplyConfiguration constructs a declarative configuration of the BuildOverrides type for use with +// apply. +func BuildOverrides() *BuildOverridesApplyConfiguration { + return &BuildOverridesApplyConfiguration{} +} + +// WithImageLabels adds the given value to the ImageLabels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ImageLabels field. +func (b *BuildOverridesApplyConfiguration) WithImageLabels(values ...*ImageLabelApplyConfiguration) *BuildOverridesApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithImageLabels") + } + b.ImageLabels = append(b.ImageLabels, *values[i]) + } + return b +} + +// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the NodeSelector field, +// overwriting an existing map entries in NodeSelector field with the same key. +func (b *BuildOverridesApplyConfiguration) WithNodeSelector(entries map[string]string) *BuildOverridesApplyConfiguration { + if b.NodeSelector == nil && len(entries) > 0 { + b.NodeSelector = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.NodeSelector[k] = v + } + return b +} + +// WithTolerations adds the given value to the Tolerations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tolerations field. +func (b *BuildOverridesApplyConfiguration) WithTolerations(values ...corev1.Toleration) *BuildOverridesApplyConfiguration { + for i := range values { + b.Tolerations = append(b.Tolerations, values[i]) + } + return b +} + +// WithForcePull sets the ForcePull field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ForcePull field is set to the value of the last call. +func (b *BuildOverridesApplyConfiguration) WithForcePull(value bool) *BuildOverridesApplyConfiguration { + b.ForcePull = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/buildspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/buildspec.go new file mode 100644 index 0000000000000..1b8cb7054e271 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/buildspec.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// BuildSpecApplyConfiguration represents a declarative configuration of the BuildSpec type for use +// with apply. +type BuildSpecApplyConfiguration struct { + AdditionalTrustedCA *ConfigMapNameReferenceApplyConfiguration `json:"additionalTrustedCA,omitempty"` + BuildDefaults *BuildDefaultsApplyConfiguration `json:"buildDefaults,omitempty"` + BuildOverrides *BuildOverridesApplyConfiguration `json:"buildOverrides,omitempty"` +} + +// BuildSpecApplyConfiguration constructs a declarative configuration of the BuildSpec type for use with +// apply. +func BuildSpec() *BuildSpecApplyConfiguration { + return &BuildSpecApplyConfiguration{} +} + +// WithAdditionalTrustedCA sets the AdditionalTrustedCA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AdditionalTrustedCA field is set to the value of the last call. +func (b *BuildSpecApplyConfiguration) WithAdditionalTrustedCA(value *ConfigMapNameReferenceApplyConfiguration) *BuildSpecApplyConfiguration { + b.AdditionalTrustedCA = value + return b +} + +// WithBuildDefaults sets the BuildDefaults field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BuildDefaults field is set to the value of the last call. +func (b *BuildSpecApplyConfiguration) WithBuildDefaults(value *BuildDefaultsApplyConfiguration) *BuildSpecApplyConfiguration { + b.BuildDefaults = value + return b +} + +// WithBuildOverrides sets the BuildOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BuildOverrides field is set to the value of the last call. +func (b *BuildSpecApplyConfiguration) WithBuildOverrides(value *BuildOverridesApplyConfiguration) *BuildSpecApplyConfiguration { + b.BuildOverrides = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudcontrollermanagerstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudcontrollermanagerstatus.go new file mode 100644 index 0000000000000..79850b75e138b --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudcontrollermanagerstatus.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// CloudControllerManagerStatusApplyConfiguration represents a declarative configuration of the CloudControllerManagerStatus type for use +// with apply. +type CloudControllerManagerStatusApplyConfiguration struct { + State *configv1.CloudControllerManagerState `json:"state,omitempty"` +} + +// CloudControllerManagerStatusApplyConfiguration constructs a declarative configuration of the CloudControllerManagerStatus type for use with +// apply. +func CloudControllerManagerStatus() *CloudControllerManagerStatusApplyConfiguration { + return &CloudControllerManagerStatusApplyConfiguration{} +} + +// WithState sets the State field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the State field is set to the value of the last call. +func (b *CloudControllerManagerStatusApplyConfiguration) WithState(value configv1.CloudControllerManagerState) *CloudControllerManagerStatusApplyConfiguration { + b.State = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerconfig.go new file mode 100644 index 0000000000000..d73faf3f20a0a --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerconfig.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// CloudLoadBalancerConfigApplyConfiguration represents a declarative configuration of the CloudLoadBalancerConfig type for use +// with apply. +type CloudLoadBalancerConfigApplyConfiguration struct { + DNSType *configv1.DNSType `json:"dnsType,omitempty"` + ClusterHosted *CloudLoadBalancerIPsApplyConfiguration `json:"clusterHosted,omitempty"` +} + +// CloudLoadBalancerConfigApplyConfiguration constructs a declarative configuration of the CloudLoadBalancerConfig type for use with +// apply. +func CloudLoadBalancerConfig() *CloudLoadBalancerConfigApplyConfiguration { + return &CloudLoadBalancerConfigApplyConfiguration{} +} + +// WithDNSType sets the DNSType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DNSType field is set to the value of the last call. +func (b *CloudLoadBalancerConfigApplyConfiguration) WithDNSType(value configv1.DNSType) *CloudLoadBalancerConfigApplyConfiguration { + b.DNSType = &value + return b +} + +// WithClusterHosted sets the ClusterHosted field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterHosted field is set to the value of the last call. +func (b *CloudLoadBalancerConfigApplyConfiguration) WithClusterHosted(value *CloudLoadBalancerIPsApplyConfiguration) *CloudLoadBalancerConfigApplyConfiguration { + b.ClusterHosted = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerips.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerips.go new file mode 100644 index 0000000000000..ce7f258509a02 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerips.go @@ -0,0 +1,51 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// CloudLoadBalancerIPsApplyConfiguration represents a declarative configuration of the CloudLoadBalancerIPs type for use +// with apply. +type CloudLoadBalancerIPsApplyConfiguration struct { + APIIntLoadBalancerIPs []configv1.IP `json:"apiIntLoadBalancerIPs,omitempty"` + APILoadBalancerIPs []configv1.IP `json:"apiLoadBalancerIPs,omitempty"` + IngressLoadBalancerIPs []configv1.IP `json:"ingressLoadBalancerIPs,omitempty"` +} + +// CloudLoadBalancerIPsApplyConfiguration constructs a declarative configuration of the CloudLoadBalancerIPs type for use with +// apply. +func CloudLoadBalancerIPs() *CloudLoadBalancerIPsApplyConfiguration { + return &CloudLoadBalancerIPsApplyConfiguration{} +} + +// WithAPIIntLoadBalancerIPs adds the given value to the APIIntLoadBalancerIPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the APIIntLoadBalancerIPs field. +func (b *CloudLoadBalancerIPsApplyConfiguration) WithAPIIntLoadBalancerIPs(values ...configv1.IP) *CloudLoadBalancerIPsApplyConfiguration { + for i := range values { + b.APIIntLoadBalancerIPs = append(b.APIIntLoadBalancerIPs, values[i]) + } + return b +} + +// WithAPILoadBalancerIPs adds the given value to the APILoadBalancerIPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the APILoadBalancerIPs field. +func (b *CloudLoadBalancerIPsApplyConfiguration) WithAPILoadBalancerIPs(values ...configv1.IP) *CloudLoadBalancerIPsApplyConfiguration { + for i := range values { + b.APILoadBalancerIPs = append(b.APILoadBalancerIPs, values[i]) + } + return b +} + +// WithIngressLoadBalancerIPs adds the given value to the IngressLoadBalancerIPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the IngressLoadBalancerIPs field. +func (b *CloudLoadBalancerIPsApplyConfiguration) WithIngressLoadBalancerIPs(values ...configv1.IP) *CloudLoadBalancerIPsApplyConfiguration { + for i := range values { + b.IngressLoadBalancerIPs = append(b.IngressLoadBalancerIPs, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clustercondition.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clustercondition.go new file mode 100644 index 0000000000000..d71c182cf1dc1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clustercondition.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ClusterConditionApplyConfiguration represents a declarative configuration of the ClusterCondition type for use +// with apply. +type ClusterConditionApplyConfiguration struct { + Type *string `json:"type,omitempty"` + PromQL *PromQLClusterConditionApplyConfiguration `json:"promql,omitempty"` +} + +// ClusterConditionApplyConfiguration constructs a declarative configuration of the ClusterCondition type for use with +// apply. +func ClusterCondition() *ClusterConditionApplyConfiguration { + return &ClusterConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *ClusterConditionApplyConfiguration) WithType(value string) *ClusterConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithPromQL sets the PromQL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PromQL field is set to the value of the last call. +func (b *ClusterConditionApplyConfiguration) WithPromQL(value *PromQLClusterConditionApplyConfiguration) *ClusterConditionApplyConfiguration { + b.PromQL = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusternetworkentry.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusternetworkentry.go new file mode 100644 index 0000000000000..ac180f893d332 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusternetworkentry.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ClusterNetworkEntryApplyConfiguration represents a declarative configuration of the ClusterNetworkEntry type for use +// with apply. +type ClusterNetworkEntryApplyConfiguration struct { + CIDR *string `json:"cidr,omitempty"` + HostPrefix *uint32 `json:"hostPrefix,omitempty"` +} + +// ClusterNetworkEntryApplyConfiguration constructs a declarative configuration of the ClusterNetworkEntry type for use with +// apply. +func ClusterNetworkEntry() *ClusterNetworkEntryApplyConfiguration { + return &ClusterNetworkEntryApplyConfiguration{} +} + +// WithCIDR sets the CIDR field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CIDR field is set to the value of the last call. +func (b *ClusterNetworkEntryApplyConfiguration) WithCIDR(value string) *ClusterNetworkEntryApplyConfiguration { + b.CIDR = &value + return b +} + +// WithHostPrefix sets the HostPrefix field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostPrefix field is set to the value of the last call. +func (b *ClusterNetworkEntryApplyConfiguration) WithHostPrefix(value uint32) *ClusterNetworkEntryApplyConfiguration { + b.HostPrefix = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperator.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperator.go new file mode 100644 index 0000000000000..4bfa43805cd5e --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperator.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterOperatorApplyConfiguration represents a declarative configuration of the ClusterOperator type for use +// with apply. +type ClusterOperatorApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *configv1.ClusterOperatorSpec `json:"spec,omitempty"` + Status *ClusterOperatorStatusApplyConfiguration `json:"status,omitempty"` +} + +// ClusterOperator constructs a declarative configuration of the ClusterOperator type for use with +// apply. +func ClusterOperator(name string) *ClusterOperatorApplyConfiguration { + b := &ClusterOperatorApplyConfiguration{} + b.WithName(name) + b.WithKind("ClusterOperator") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractClusterOperator extracts the applied configuration owned by fieldManager from +// clusterOperator. If no managedFields are found in clusterOperator for fieldManager, a +// ClusterOperatorApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// clusterOperator must be a unmodified ClusterOperator API object that was retrieved from the Kubernetes API. +// ExtractClusterOperator provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractClusterOperator(clusterOperator *configv1.ClusterOperator, fieldManager string) (*ClusterOperatorApplyConfiguration, error) { + return extractClusterOperator(clusterOperator, fieldManager, "") +} + +// ExtractClusterOperatorStatus is the same as ExtractClusterOperator except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractClusterOperatorStatus(clusterOperator *configv1.ClusterOperator, fieldManager string) (*ClusterOperatorApplyConfiguration, error) { + return extractClusterOperator(clusterOperator, fieldManager, "status") +} + +func extractClusterOperator(clusterOperator *configv1.ClusterOperator, fieldManager string, subresource string) (*ClusterOperatorApplyConfiguration, error) { + b := &ClusterOperatorApplyConfiguration{} + err := managedfields.ExtractInto(clusterOperator, internal.Parser().Type("com.github.openshift.api.config.v1.ClusterOperator"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(clusterOperator.Name) + + b.WithKind("ClusterOperator") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ClusterOperatorApplyConfiguration) WithKind(value string) *ClusterOperatorApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ClusterOperatorApplyConfiguration) WithAPIVersion(value string) *ClusterOperatorApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ClusterOperatorApplyConfiguration) WithName(value string) *ClusterOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ClusterOperatorApplyConfiguration) WithGenerateName(value string) *ClusterOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ClusterOperatorApplyConfiguration) WithNamespace(value string) *ClusterOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ClusterOperatorApplyConfiguration) WithUID(value types.UID) *ClusterOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ClusterOperatorApplyConfiguration) WithResourceVersion(value string) *ClusterOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ClusterOperatorApplyConfiguration) WithGeneration(value int64) *ClusterOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ClusterOperatorApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ClusterOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ClusterOperatorApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ClusterOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ClusterOperatorApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ClusterOperatorApplyConfiguration) WithLabels(entries map[string]string) *ClusterOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ClusterOperatorApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ClusterOperatorApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ClusterOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ClusterOperatorApplyConfiguration) WithFinalizers(values ...string) *ClusterOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ClusterOperatorApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ClusterOperatorApplyConfiguration) WithSpec(value configv1.ClusterOperatorSpec) *ClusterOperatorApplyConfiguration { + b.Spec = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ClusterOperatorApplyConfiguration) WithStatus(value *ClusterOperatorStatusApplyConfiguration) *ClusterOperatorApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterOperatorApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatus.go new file mode 100644 index 0000000000000..d5a1989655c5b --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatus.go @@ -0,0 +1,69 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// ClusterOperatorStatusApplyConfiguration represents a declarative configuration of the ClusterOperatorStatus type for use +// with apply. +type ClusterOperatorStatusApplyConfiguration struct { + Conditions []ClusterOperatorStatusConditionApplyConfiguration `json:"conditions,omitempty"` + Versions []OperandVersionApplyConfiguration `json:"versions,omitempty"` + RelatedObjects []ObjectReferenceApplyConfiguration `json:"relatedObjects,omitempty"` + Extension *runtime.RawExtension `json:"extension,omitempty"` +} + +// ClusterOperatorStatusApplyConfiguration constructs a declarative configuration of the ClusterOperatorStatus type for use with +// apply. +func ClusterOperatorStatus() *ClusterOperatorStatusApplyConfiguration { + return &ClusterOperatorStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ClusterOperatorStatusApplyConfiguration) WithConditions(values ...*ClusterOperatorStatusConditionApplyConfiguration) *ClusterOperatorStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithVersions adds the given value to the Versions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Versions field. +func (b *ClusterOperatorStatusApplyConfiguration) WithVersions(values ...*OperandVersionApplyConfiguration) *ClusterOperatorStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithVersions") + } + b.Versions = append(b.Versions, *values[i]) + } + return b +} + +// WithRelatedObjects adds the given value to the RelatedObjects field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RelatedObjects field. +func (b *ClusterOperatorStatusApplyConfiguration) WithRelatedObjects(values ...*ObjectReferenceApplyConfiguration) *ClusterOperatorStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRelatedObjects") + } + b.RelatedObjects = append(b.RelatedObjects, *values[i]) + } + return b +} + +// WithExtension sets the Extension field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Extension field is set to the value of the last call. +func (b *ClusterOperatorStatusApplyConfiguration) WithExtension(value runtime.RawExtension) *ClusterOperatorStatusApplyConfiguration { + b.Extension = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatuscondition.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatuscondition.go new file mode 100644 index 0000000000000..3e58daa811581 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatuscondition.go @@ -0,0 +1,64 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ClusterOperatorStatusConditionApplyConfiguration represents a declarative configuration of the ClusterOperatorStatusCondition type for use +// with apply. +type ClusterOperatorStatusConditionApplyConfiguration struct { + Type *configv1.ClusterStatusConditionType `json:"type,omitempty"` + Status *configv1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// ClusterOperatorStatusConditionApplyConfiguration constructs a declarative configuration of the ClusterOperatorStatusCondition type for use with +// apply. +func ClusterOperatorStatusCondition() *ClusterOperatorStatusConditionApplyConfiguration { + return &ClusterOperatorStatusConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *ClusterOperatorStatusConditionApplyConfiguration) WithType(value configv1.ClusterStatusConditionType) *ClusterOperatorStatusConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ClusterOperatorStatusConditionApplyConfiguration) WithStatus(value configv1.ConditionStatus) *ClusterOperatorStatusConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *ClusterOperatorStatusConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *ClusterOperatorStatusConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *ClusterOperatorStatusConditionApplyConfiguration) WithReason(value string) *ClusterOperatorStatusConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *ClusterOperatorStatusConditionApplyConfiguration) WithMessage(value string) *ClusterOperatorStatusConditionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversion.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversion.go new file mode 100644 index 0000000000000..69073ee5c99ca --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversion.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterVersionApplyConfiguration represents a declarative configuration of the ClusterVersion type for use +// with apply. +type ClusterVersionApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ClusterVersionSpecApplyConfiguration `json:"spec,omitempty"` + Status *ClusterVersionStatusApplyConfiguration `json:"status,omitempty"` +} + +// ClusterVersion constructs a declarative configuration of the ClusterVersion type for use with +// apply. +func ClusterVersion(name string) *ClusterVersionApplyConfiguration { + b := &ClusterVersionApplyConfiguration{} + b.WithName(name) + b.WithKind("ClusterVersion") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractClusterVersion extracts the applied configuration owned by fieldManager from +// clusterVersion. If no managedFields are found in clusterVersion for fieldManager, a +// ClusterVersionApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// clusterVersion must be a unmodified ClusterVersion API object that was retrieved from the Kubernetes API. +// ExtractClusterVersion provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractClusterVersion(clusterVersion *configv1.ClusterVersion, fieldManager string) (*ClusterVersionApplyConfiguration, error) { + return extractClusterVersion(clusterVersion, fieldManager, "") +} + +// ExtractClusterVersionStatus is the same as ExtractClusterVersion except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractClusterVersionStatus(clusterVersion *configv1.ClusterVersion, fieldManager string) (*ClusterVersionApplyConfiguration, error) { + return extractClusterVersion(clusterVersion, fieldManager, "status") +} + +func extractClusterVersion(clusterVersion *configv1.ClusterVersion, fieldManager string, subresource string) (*ClusterVersionApplyConfiguration, error) { + b := &ClusterVersionApplyConfiguration{} + err := managedfields.ExtractInto(clusterVersion, internal.Parser().Type("com.github.openshift.api.config.v1.ClusterVersion"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(clusterVersion.Name) + + b.WithKind("ClusterVersion") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ClusterVersionApplyConfiguration) WithKind(value string) *ClusterVersionApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ClusterVersionApplyConfiguration) WithAPIVersion(value string) *ClusterVersionApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ClusterVersionApplyConfiguration) WithName(value string) *ClusterVersionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ClusterVersionApplyConfiguration) WithGenerateName(value string) *ClusterVersionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ClusterVersionApplyConfiguration) WithNamespace(value string) *ClusterVersionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ClusterVersionApplyConfiguration) WithUID(value types.UID) *ClusterVersionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ClusterVersionApplyConfiguration) WithResourceVersion(value string) *ClusterVersionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ClusterVersionApplyConfiguration) WithGeneration(value int64) *ClusterVersionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ClusterVersionApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ClusterVersionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ClusterVersionApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ClusterVersionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ClusterVersionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterVersionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ClusterVersionApplyConfiguration) WithLabels(entries map[string]string) *ClusterVersionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ClusterVersionApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterVersionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ClusterVersionApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ClusterVersionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ClusterVersionApplyConfiguration) WithFinalizers(values ...string) *ClusterVersionApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ClusterVersionApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ClusterVersionApplyConfiguration) WithSpec(value *ClusterVersionSpecApplyConfiguration) *ClusterVersionApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ClusterVersionApplyConfiguration) WithStatus(value *ClusterVersionStatusApplyConfiguration) *ClusterVersionApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterVersionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesspec.go new file mode 100644 index 0000000000000..feb03e3c36be6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesspec.go @@ -0,0 +1,38 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// ClusterVersionCapabilitiesSpecApplyConfiguration represents a declarative configuration of the ClusterVersionCapabilitiesSpec type for use +// with apply. +type ClusterVersionCapabilitiesSpecApplyConfiguration struct { + BaselineCapabilitySet *configv1.ClusterVersionCapabilitySet `json:"baselineCapabilitySet,omitempty"` + AdditionalEnabledCapabilities []configv1.ClusterVersionCapability `json:"additionalEnabledCapabilities,omitempty"` +} + +// ClusterVersionCapabilitiesSpecApplyConfiguration constructs a declarative configuration of the ClusterVersionCapabilitiesSpec type for use with +// apply. +func ClusterVersionCapabilitiesSpec() *ClusterVersionCapabilitiesSpecApplyConfiguration { + return &ClusterVersionCapabilitiesSpecApplyConfiguration{} +} + +// WithBaselineCapabilitySet sets the BaselineCapabilitySet field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BaselineCapabilitySet field is set to the value of the last call. +func (b *ClusterVersionCapabilitiesSpecApplyConfiguration) WithBaselineCapabilitySet(value configv1.ClusterVersionCapabilitySet) *ClusterVersionCapabilitiesSpecApplyConfiguration { + b.BaselineCapabilitySet = &value + return b +} + +// WithAdditionalEnabledCapabilities adds the given value to the AdditionalEnabledCapabilities field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AdditionalEnabledCapabilities field. +func (b *ClusterVersionCapabilitiesSpecApplyConfiguration) WithAdditionalEnabledCapabilities(values ...configv1.ClusterVersionCapability) *ClusterVersionCapabilitiesSpecApplyConfiguration { + for i := range values { + b.AdditionalEnabledCapabilities = append(b.AdditionalEnabledCapabilities, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesstatus.go new file mode 100644 index 0000000000000..2a8807fe2a081 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesstatus.go @@ -0,0 +1,40 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// ClusterVersionCapabilitiesStatusApplyConfiguration represents a declarative configuration of the ClusterVersionCapabilitiesStatus type for use +// with apply. +type ClusterVersionCapabilitiesStatusApplyConfiguration struct { + EnabledCapabilities []configv1.ClusterVersionCapability `json:"enabledCapabilities,omitempty"` + KnownCapabilities []configv1.ClusterVersionCapability `json:"knownCapabilities,omitempty"` +} + +// ClusterVersionCapabilitiesStatusApplyConfiguration constructs a declarative configuration of the ClusterVersionCapabilitiesStatus type for use with +// apply. +func ClusterVersionCapabilitiesStatus() *ClusterVersionCapabilitiesStatusApplyConfiguration { + return &ClusterVersionCapabilitiesStatusApplyConfiguration{} +} + +// WithEnabledCapabilities adds the given value to the EnabledCapabilities field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the EnabledCapabilities field. +func (b *ClusterVersionCapabilitiesStatusApplyConfiguration) WithEnabledCapabilities(values ...configv1.ClusterVersionCapability) *ClusterVersionCapabilitiesStatusApplyConfiguration { + for i := range values { + b.EnabledCapabilities = append(b.EnabledCapabilities, values[i]) + } + return b +} + +// WithKnownCapabilities adds the given value to the KnownCapabilities field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the KnownCapabilities field. +func (b *ClusterVersionCapabilitiesStatusApplyConfiguration) WithKnownCapabilities(values ...configv1.ClusterVersionCapability) *ClusterVersionCapabilitiesStatusApplyConfiguration { + for i := range values { + b.KnownCapabilities = append(b.KnownCapabilities, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionspec.go new file mode 100644 index 0000000000000..926f2955720f9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionspec.go @@ -0,0 +1,91 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// ClusterVersionSpecApplyConfiguration represents a declarative configuration of the ClusterVersionSpec type for use +// with apply. +type ClusterVersionSpecApplyConfiguration struct { + ClusterID *configv1.ClusterID `json:"clusterID,omitempty"` + DesiredUpdate *UpdateApplyConfiguration `json:"desiredUpdate,omitempty"` + Upstream *configv1.URL `json:"upstream,omitempty"` + Channel *string `json:"channel,omitempty"` + Capabilities *ClusterVersionCapabilitiesSpecApplyConfiguration `json:"capabilities,omitempty"` + SignatureStores []SignatureStoreApplyConfiguration `json:"signatureStores,omitempty"` + Overrides []ComponentOverrideApplyConfiguration `json:"overrides,omitempty"` +} + +// ClusterVersionSpecApplyConfiguration constructs a declarative configuration of the ClusterVersionSpec type for use with +// apply. +func ClusterVersionSpec() *ClusterVersionSpecApplyConfiguration { + return &ClusterVersionSpecApplyConfiguration{} +} + +// WithClusterID sets the ClusterID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterID field is set to the value of the last call. +func (b *ClusterVersionSpecApplyConfiguration) WithClusterID(value configv1.ClusterID) *ClusterVersionSpecApplyConfiguration { + b.ClusterID = &value + return b +} + +// WithDesiredUpdate sets the DesiredUpdate field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DesiredUpdate field is set to the value of the last call. +func (b *ClusterVersionSpecApplyConfiguration) WithDesiredUpdate(value *UpdateApplyConfiguration) *ClusterVersionSpecApplyConfiguration { + b.DesiredUpdate = value + return b +} + +// WithUpstream sets the Upstream field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Upstream field is set to the value of the last call. +func (b *ClusterVersionSpecApplyConfiguration) WithUpstream(value configv1.URL) *ClusterVersionSpecApplyConfiguration { + b.Upstream = &value + return b +} + +// WithChannel sets the Channel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Channel field is set to the value of the last call. +func (b *ClusterVersionSpecApplyConfiguration) WithChannel(value string) *ClusterVersionSpecApplyConfiguration { + b.Channel = &value + return b +} + +// WithCapabilities sets the Capabilities field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Capabilities field is set to the value of the last call. +func (b *ClusterVersionSpecApplyConfiguration) WithCapabilities(value *ClusterVersionCapabilitiesSpecApplyConfiguration) *ClusterVersionSpecApplyConfiguration { + b.Capabilities = value + return b +} + +// WithSignatureStores adds the given value to the SignatureStores field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the SignatureStores field. +func (b *ClusterVersionSpecApplyConfiguration) WithSignatureStores(values ...*SignatureStoreApplyConfiguration) *ClusterVersionSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSignatureStores") + } + b.SignatureStores = append(b.SignatureStores, *values[i]) + } + return b +} + +// WithOverrides adds the given value to the Overrides field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Overrides field. +func (b *ClusterVersionSpecApplyConfiguration) WithOverrides(values ...*ComponentOverrideApplyConfiguration) *ClusterVersionSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOverrides") + } + b.Overrides = append(b.Overrides, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionstatus.go new file mode 100644 index 0000000000000..e966cf424211c --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionstatus.go @@ -0,0 +1,106 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ClusterVersionStatusApplyConfiguration represents a declarative configuration of the ClusterVersionStatus type for use +// with apply. +type ClusterVersionStatusApplyConfiguration struct { + Desired *ReleaseApplyConfiguration `json:"desired,omitempty"` + History []UpdateHistoryApplyConfiguration `json:"history,omitempty"` + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + VersionHash *string `json:"versionHash,omitempty"` + Capabilities *ClusterVersionCapabilitiesStatusApplyConfiguration `json:"capabilities,omitempty"` + Conditions []ClusterOperatorStatusConditionApplyConfiguration `json:"conditions,omitempty"` + AvailableUpdates []ReleaseApplyConfiguration `json:"availableUpdates,omitempty"` + ConditionalUpdates []ConditionalUpdateApplyConfiguration `json:"conditionalUpdates,omitempty"` +} + +// ClusterVersionStatusApplyConfiguration constructs a declarative configuration of the ClusterVersionStatus type for use with +// apply. +func ClusterVersionStatus() *ClusterVersionStatusApplyConfiguration { + return &ClusterVersionStatusApplyConfiguration{} +} + +// WithDesired sets the Desired field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Desired field is set to the value of the last call. +func (b *ClusterVersionStatusApplyConfiguration) WithDesired(value *ReleaseApplyConfiguration) *ClusterVersionStatusApplyConfiguration { + b.Desired = value + return b +} + +// WithHistory adds the given value to the History field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the History field. +func (b *ClusterVersionStatusApplyConfiguration) WithHistory(values ...*UpdateHistoryApplyConfiguration) *ClusterVersionStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithHistory") + } + b.History = append(b.History, *values[i]) + } + return b +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *ClusterVersionStatusApplyConfiguration) WithObservedGeneration(value int64) *ClusterVersionStatusApplyConfiguration { + b.ObservedGeneration = &value + return b +} + +// WithVersionHash sets the VersionHash field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VersionHash field is set to the value of the last call. +func (b *ClusterVersionStatusApplyConfiguration) WithVersionHash(value string) *ClusterVersionStatusApplyConfiguration { + b.VersionHash = &value + return b +} + +// WithCapabilities sets the Capabilities field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Capabilities field is set to the value of the last call. +func (b *ClusterVersionStatusApplyConfiguration) WithCapabilities(value *ClusterVersionCapabilitiesStatusApplyConfiguration) *ClusterVersionStatusApplyConfiguration { + b.Capabilities = value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ClusterVersionStatusApplyConfiguration) WithConditions(values ...*ClusterOperatorStatusConditionApplyConfiguration) *ClusterVersionStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithAvailableUpdates adds the given value to the AvailableUpdates field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AvailableUpdates field. +func (b *ClusterVersionStatusApplyConfiguration) WithAvailableUpdates(values ...*ReleaseApplyConfiguration) *ClusterVersionStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAvailableUpdates") + } + b.AvailableUpdates = append(b.AvailableUpdates, *values[i]) + } + return b +} + +// WithConditionalUpdates adds the given value to the ConditionalUpdates field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ConditionalUpdates field. +func (b *ClusterVersionStatusApplyConfiguration) WithConditionalUpdates(values ...*ConditionalUpdateApplyConfiguration) *ClusterVersionStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditionalUpdates") + } + b.ConditionalUpdates = append(b.ConditionalUpdates, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentoverride.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentoverride.go new file mode 100644 index 0000000000000..e87332d896590 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentoverride.go @@ -0,0 +1,59 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ComponentOverrideApplyConfiguration represents a declarative configuration of the ComponentOverride type for use +// with apply. +type ComponentOverrideApplyConfiguration struct { + Kind *string `json:"kind,omitempty"` + Group *string `json:"group,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` + Unmanaged *bool `json:"unmanaged,omitempty"` +} + +// ComponentOverrideApplyConfiguration constructs a declarative configuration of the ComponentOverride type for use with +// apply. +func ComponentOverride() *ComponentOverrideApplyConfiguration { + return &ComponentOverrideApplyConfiguration{} +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ComponentOverrideApplyConfiguration) WithKind(value string) *ComponentOverrideApplyConfiguration { + b.Kind = &value + return b +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *ComponentOverrideApplyConfiguration) WithGroup(value string) *ComponentOverrideApplyConfiguration { + b.Group = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ComponentOverrideApplyConfiguration) WithNamespace(value string) *ComponentOverrideApplyConfiguration { + b.Namespace = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ComponentOverrideApplyConfiguration) WithName(value string) *ComponentOverrideApplyConfiguration { + b.Name = &value + return b +} + +// WithUnmanaged sets the Unmanaged field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Unmanaged field is set to the value of the last call. +func (b *ComponentOverrideApplyConfiguration) WithUnmanaged(value bool) *ComponentOverrideApplyConfiguration { + b.Unmanaged = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutespec.go new file mode 100644 index 0000000000000..beebd2b0246dc --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutespec.go @@ -0,0 +1,54 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// ComponentRouteSpecApplyConfiguration represents a declarative configuration of the ComponentRouteSpec type for use +// with apply. +type ComponentRouteSpecApplyConfiguration struct { + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` + Hostname *configv1.Hostname `json:"hostname,omitempty"` + ServingCertKeyPairSecret *SecretNameReferenceApplyConfiguration `json:"servingCertKeyPairSecret,omitempty"` +} + +// ComponentRouteSpecApplyConfiguration constructs a declarative configuration of the ComponentRouteSpec type for use with +// apply. +func ComponentRouteSpec() *ComponentRouteSpecApplyConfiguration { + return &ComponentRouteSpecApplyConfiguration{} +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ComponentRouteSpecApplyConfiguration) WithNamespace(value string) *ComponentRouteSpecApplyConfiguration { + b.Namespace = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ComponentRouteSpecApplyConfiguration) WithName(value string) *ComponentRouteSpecApplyConfiguration { + b.Name = &value + return b +} + +// WithHostname sets the Hostname field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Hostname field is set to the value of the last call. +func (b *ComponentRouteSpecApplyConfiguration) WithHostname(value configv1.Hostname) *ComponentRouteSpecApplyConfiguration { + b.Hostname = &value + return b +} + +// WithServingCertKeyPairSecret sets the ServingCertKeyPairSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServingCertKeyPairSecret field is set to the value of the last call. +func (b *ComponentRouteSpecApplyConfiguration) WithServingCertKeyPairSecret(value *SecretNameReferenceApplyConfiguration) *ComponentRouteSpecApplyConfiguration { + b.ServingCertKeyPairSecret = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutestatus.go new file mode 100644 index 0000000000000..ae955388217d0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutestatus.go @@ -0,0 +1,96 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ComponentRouteStatusApplyConfiguration represents a declarative configuration of the ComponentRouteStatus type for use +// with apply. +type ComponentRouteStatusApplyConfiguration struct { + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` + DefaultHostname *configv1.Hostname `json:"defaultHostname,omitempty"` + ConsumingUsers []configv1.ConsumingUser `json:"consumingUsers,omitempty"` + CurrentHostnames []configv1.Hostname `json:"currentHostnames,omitempty"` + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` + RelatedObjects []ObjectReferenceApplyConfiguration `json:"relatedObjects,omitempty"` +} + +// ComponentRouteStatusApplyConfiguration constructs a declarative configuration of the ComponentRouteStatus type for use with +// apply. +func ComponentRouteStatus() *ComponentRouteStatusApplyConfiguration { + return &ComponentRouteStatusApplyConfiguration{} +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ComponentRouteStatusApplyConfiguration) WithNamespace(value string) *ComponentRouteStatusApplyConfiguration { + b.Namespace = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ComponentRouteStatusApplyConfiguration) WithName(value string) *ComponentRouteStatusApplyConfiguration { + b.Name = &value + return b +} + +// WithDefaultHostname sets the DefaultHostname field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DefaultHostname field is set to the value of the last call. +func (b *ComponentRouteStatusApplyConfiguration) WithDefaultHostname(value configv1.Hostname) *ComponentRouteStatusApplyConfiguration { + b.DefaultHostname = &value + return b +} + +// WithConsumingUsers adds the given value to the ConsumingUsers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ConsumingUsers field. +func (b *ComponentRouteStatusApplyConfiguration) WithConsumingUsers(values ...configv1.ConsumingUser) *ComponentRouteStatusApplyConfiguration { + for i := range values { + b.ConsumingUsers = append(b.ConsumingUsers, values[i]) + } + return b +} + +// WithCurrentHostnames adds the given value to the CurrentHostnames field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the CurrentHostnames field. +func (b *ComponentRouteStatusApplyConfiguration) WithCurrentHostnames(values ...configv1.Hostname) *ComponentRouteStatusApplyConfiguration { + for i := range values { + b.CurrentHostnames = append(b.CurrentHostnames, values[i]) + } + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ComponentRouteStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *ComponentRouteStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithRelatedObjects adds the given value to the RelatedObjects field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RelatedObjects field. +func (b *ComponentRouteStatusApplyConfiguration) WithRelatedObjects(values ...*ObjectReferenceApplyConfiguration) *ComponentRouteStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRelatedObjects") + } + b.RelatedObjects = append(b.RelatedObjects, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/conditionalupdate.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/conditionalupdate.go new file mode 100644 index 0000000000000..f183fc6e252ba --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/conditionalupdate.go @@ -0,0 +1,55 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ConditionalUpdateApplyConfiguration represents a declarative configuration of the ConditionalUpdate type for use +// with apply. +type ConditionalUpdateApplyConfiguration struct { + Release *ReleaseApplyConfiguration `json:"release,omitempty"` + Risks []ConditionalUpdateRiskApplyConfiguration `json:"risks,omitempty"` + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// ConditionalUpdateApplyConfiguration constructs a declarative configuration of the ConditionalUpdate type for use with +// apply. +func ConditionalUpdate() *ConditionalUpdateApplyConfiguration { + return &ConditionalUpdateApplyConfiguration{} +} + +// WithRelease sets the Release field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Release field is set to the value of the last call. +func (b *ConditionalUpdateApplyConfiguration) WithRelease(value *ReleaseApplyConfiguration) *ConditionalUpdateApplyConfiguration { + b.Release = value + return b +} + +// WithRisks adds the given value to the Risks field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Risks field. +func (b *ConditionalUpdateApplyConfiguration) WithRisks(values ...*ConditionalUpdateRiskApplyConfiguration) *ConditionalUpdateApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRisks") + } + b.Risks = append(b.Risks, *values[i]) + } + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ConditionalUpdateApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *ConditionalUpdateApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/conditionalupdaterisk.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/conditionalupdaterisk.go new file mode 100644 index 0000000000000..6debb6e624574 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/conditionalupdaterisk.go @@ -0,0 +1,55 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ConditionalUpdateRiskApplyConfiguration represents a declarative configuration of the ConditionalUpdateRisk type for use +// with apply. +type ConditionalUpdateRiskApplyConfiguration struct { + URL *string `json:"url,omitempty"` + Name *string `json:"name,omitempty"` + Message *string `json:"message,omitempty"` + MatchingRules []ClusterConditionApplyConfiguration `json:"matchingRules,omitempty"` +} + +// ConditionalUpdateRiskApplyConfiguration constructs a declarative configuration of the ConditionalUpdateRisk type for use with +// apply. +func ConditionalUpdateRisk() *ConditionalUpdateRiskApplyConfiguration { + return &ConditionalUpdateRiskApplyConfiguration{} +} + +// WithURL sets the URL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the URL field is set to the value of the last call. +func (b *ConditionalUpdateRiskApplyConfiguration) WithURL(value string) *ConditionalUpdateRiskApplyConfiguration { + b.URL = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ConditionalUpdateRiskApplyConfiguration) WithName(value string) *ConditionalUpdateRiskApplyConfiguration { + b.Name = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *ConditionalUpdateRiskApplyConfiguration) WithMessage(value string) *ConditionalUpdateRiskApplyConfiguration { + b.Message = &value + return b +} + +// WithMatchingRules adds the given value to the MatchingRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchingRules field. +func (b *ConditionalUpdateRiskApplyConfiguration) WithMatchingRules(values ...*ClusterConditionApplyConfiguration) *ConditionalUpdateRiskApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchingRules") + } + b.MatchingRules = append(b.MatchingRules, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/configmapfilereference.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/configmapfilereference.go new file mode 100644 index 0000000000000..3c70be2c1e8a1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/configmapfilereference.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ConfigMapFileReferenceApplyConfiguration represents a declarative configuration of the ConfigMapFileReference type for use +// with apply. +type ConfigMapFileReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Key *string `json:"key,omitempty"` +} + +// ConfigMapFileReferenceApplyConfiguration constructs a declarative configuration of the ConfigMapFileReference type for use with +// apply. +func ConfigMapFileReference() *ConfigMapFileReferenceApplyConfiguration { + return &ConfigMapFileReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ConfigMapFileReferenceApplyConfiguration) WithName(value string) *ConfigMapFileReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithKey sets the Key field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Key field is set to the value of the last call. +func (b *ConfigMapFileReferenceApplyConfiguration) WithKey(value string) *ConfigMapFileReferenceApplyConfiguration { + b.Key = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/configmapnamereference.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/configmapnamereference.go new file mode 100644 index 0000000000000..8236ba12331ef --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/configmapnamereference.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ConfigMapNameReferenceApplyConfiguration represents a declarative configuration of the ConfigMapNameReference type for use +// with apply. +type ConfigMapNameReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// ConfigMapNameReferenceApplyConfiguration constructs a declarative configuration of the ConfigMapNameReference type for use with +// apply. +func ConfigMapNameReference() *ConfigMapNameReferenceApplyConfiguration { + return &ConfigMapNameReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ConfigMapNameReferenceApplyConfiguration) WithName(value string) *ConfigMapNameReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/console.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/console.go new file mode 100644 index 0000000000000..8e04091da8d98 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/console.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ConsoleApplyConfiguration represents a declarative configuration of the Console type for use +// with apply. +type ConsoleApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ConsoleSpecApplyConfiguration `json:"spec,omitempty"` + Status *ConsoleStatusApplyConfiguration `json:"status,omitempty"` +} + +// Console constructs a declarative configuration of the Console type for use with +// apply. +func Console(name string) *ConsoleApplyConfiguration { + b := &ConsoleApplyConfiguration{} + b.WithName(name) + b.WithKind("Console") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractConsole extracts the applied configuration owned by fieldManager from +// console. If no managedFields are found in console for fieldManager, a +// ConsoleApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// console must be a unmodified Console API object that was retrieved from the Kubernetes API. +// ExtractConsole provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractConsole(console *configv1.Console, fieldManager string) (*ConsoleApplyConfiguration, error) { + return extractConsole(console, fieldManager, "") +} + +// ExtractConsoleStatus is the same as ExtractConsole except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractConsoleStatus(console *configv1.Console, fieldManager string) (*ConsoleApplyConfiguration, error) { + return extractConsole(console, fieldManager, "status") +} + +func extractConsole(console *configv1.Console, fieldManager string, subresource string) (*ConsoleApplyConfiguration, error) { + b := &ConsoleApplyConfiguration{} + err := managedfields.ExtractInto(console, internal.Parser().Type("com.github.openshift.api.config.v1.Console"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(console.Name) + + b.WithKind("Console") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithKind(value string) *ConsoleApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithAPIVersion(value string) *ConsoleApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithName(value string) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithGenerateName(value string) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithNamespace(value string) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithUID(value types.UID) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithResourceVersion(value string) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithGeneration(value int64) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ConsoleApplyConfiguration) WithLabels(entries map[string]string) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ConsoleApplyConfiguration) WithAnnotations(entries map[string]string) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ConsoleApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ConsoleApplyConfiguration) WithFinalizers(values ...string) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ConsoleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithSpec(value *ConsoleSpecApplyConfiguration) *ConsoleApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithStatus(value *ConsoleStatusApplyConfiguration) *ConsoleApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ConsoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consoleauthentication.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consoleauthentication.go new file mode 100644 index 0000000000000..cdc3aa73202b5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consoleauthentication.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ConsoleAuthenticationApplyConfiguration represents a declarative configuration of the ConsoleAuthentication type for use +// with apply. +type ConsoleAuthenticationApplyConfiguration struct { + LogoutRedirect *string `json:"logoutRedirect,omitempty"` +} + +// ConsoleAuthenticationApplyConfiguration constructs a declarative configuration of the ConsoleAuthentication type for use with +// apply. +func ConsoleAuthentication() *ConsoleAuthenticationApplyConfiguration { + return &ConsoleAuthenticationApplyConfiguration{} +} + +// WithLogoutRedirect sets the LogoutRedirect field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogoutRedirect field is set to the value of the last call. +func (b *ConsoleAuthenticationApplyConfiguration) WithLogoutRedirect(value string) *ConsoleAuthenticationApplyConfiguration { + b.LogoutRedirect = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consolespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consolespec.go new file mode 100644 index 0000000000000..0ce163b2bb90c --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consolespec.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ConsoleSpecApplyConfiguration represents a declarative configuration of the ConsoleSpec type for use +// with apply. +type ConsoleSpecApplyConfiguration struct { + Authentication *ConsoleAuthenticationApplyConfiguration `json:"authentication,omitempty"` +} + +// ConsoleSpecApplyConfiguration constructs a declarative configuration of the ConsoleSpec type for use with +// apply. +func ConsoleSpec() *ConsoleSpecApplyConfiguration { + return &ConsoleSpecApplyConfiguration{} +} + +// WithAuthentication sets the Authentication field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Authentication field is set to the value of the last call. +func (b *ConsoleSpecApplyConfiguration) WithAuthentication(value *ConsoleAuthenticationApplyConfiguration) *ConsoleSpecApplyConfiguration { + b.Authentication = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consolestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consolestatus.go new file mode 100644 index 0000000000000..f1336def37eb4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consolestatus.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ConsoleStatusApplyConfiguration represents a declarative configuration of the ConsoleStatus type for use +// with apply. +type ConsoleStatusApplyConfiguration struct { + ConsoleURL *string `json:"consoleURL,omitempty"` +} + +// ConsoleStatusApplyConfiguration constructs a declarative configuration of the ConsoleStatus type for use with +// apply. +func ConsoleStatus() *ConsoleStatusApplyConfiguration { + return &ConsoleStatusApplyConfiguration{} +} + +// WithConsoleURL sets the ConsoleURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConsoleURL field is set to the value of the last call. +func (b *ConsoleStatusApplyConfiguration) WithConsoleURL(value string) *ConsoleStatusApplyConfiguration { + b.ConsoleURL = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customfeaturegates.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customfeaturegates.go new file mode 100644 index 0000000000000..7cd70c7ee2712 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customfeaturegates.go @@ -0,0 +1,40 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// CustomFeatureGatesApplyConfiguration represents a declarative configuration of the CustomFeatureGates type for use +// with apply. +type CustomFeatureGatesApplyConfiguration struct { + Enabled []configv1.FeatureGateName `json:"enabled,omitempty"` + Disabled []configv1.FeatureGateName `json:"disabled,omitempty"` +} + +// CustomFeatureGatesApplyConfiguration constructs a declarative configuration of the CustomFeatureGates type for use with +// apply. +func CustomFeatureGates() *CustomFeatureGatesApplyConfiguration { + return &CustomFeatureGatesApplyConfiguration{} +} + +// WithEnabled adds the given value to the Enabled field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Enabled field. +func (b *CustomFeatureGatesApplyConfiguration) WithEnabled(values ...configv1.FeatureGateName) *CustomFeatureGatesApplyConfiguration { + for i := range values { + b.Enabled = append(b.Enabled, values[i]) + } + return b +} + +// WithDisabled adds the given value to the Disabled field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Disabled field. +func (b *CustomFeatureGatesApplyConfiguration) WithDisabled(values ...configv1.FeatureGateName) *CustomFeatureGatesApplyConfiguration { + for i := range values { + b.Disabled = append(b.Disabled, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customtlsprofile.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customtlsprofile.go new file mode 100644 index 0000000000000..ae03671cd3f89 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customtlsprofile.go @@ -0,0 +1,37 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// CustomTLSProfileApplyConfiguration represents a declarative configuration of the CustomTLSProfile type for use +// with apply. +type CustomTLSProfileApplyConfiguration struct { + TLSProfileSpecApplyConfiguration `json:",inline"` +} + +// CustomTLSProfileApplyConfiguration constructs a declarative configuration of the CustomTLSProfile type for use with +// apply. +func CustomTLSProfile() *CustomTLSProfileApplyConfiguration { + return &CustomTLSProfileApplyConfiguration{} +} + +// WithCiphers adds the given value to the Ciphers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ciphers field. +func (b *CustomTLSProfileApplyConfiguration) WithCiphers(values ...string) *CustomTLSProfileApplyConfiguration { + for i := range values { + b.TLSProfileSpecApplyConfiguration.Ciphers = append(b.TLSProfileSpecApplyConfiguration.Ciphers, values[i]) + } + return b +} + +// WithMinTLSVersion sets the MinTLSVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinTLSVersion field is set to the value of the last call. +func (b *CustomTLSProfileApplyConfiguration) WithMinTLSVersion(value configv1.TLSProtocolVersion) *CustomTLSProfileApplyConfiguration { + b.TLSProfileSpecApplyConfiguration.MinTLSVersion = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/deprecatedwebhooktokenauthenticator.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/deprecatedwebhooktokenauthenticator.go new file mode 100644 index 0000000000000..20742aec97472 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/deprecatedwebhooktokenauthenticator.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// DeprecatedWebhookTokenAuthenticatorApplyConfiguration represents a declarative configuration of the DeprecatedWebhookTokenAuthenticator type for use +// with apply. +type DeprecatedWebhookTokenAuthenticatorApplyConfiguration struct { + KubeConfig *SecretNameReferenceApplyConfiguration `json:"kubeConfig,omitempty"` +} + +// DeprecatedWebhookTokenAuthenticatorApplyConfiguration constructs a declarative configuration of the DeprecatedWebhookTokenAuthenticator type for use with +// apply. +func DeprecatedWebhookTokenAuthenticator() *DeprecatedWebhookTokenAuthenticatorApplyConfiguration { + return &DeprecatedWebhookTokenAuthenticatorApplyConfiguration{} +} + +// WithKubeConfig sets the KubeConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KubeConfig field is set to the value of the last call. +func (b *DeprecatedWebhookTokenAuthenticatorApplyConfiguration) WithKubeConfig(value *SecretNameReferenceApplyConfiguration) *DeprecatedWebhookTokenAuthenticatorApplyConfiguration { + b.KubeConfig = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dns.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dns.go new file mode 100644 index 0000000000000..4ca934c965c8d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dns.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// DNSApplyConfiguration represents a declarative configuration of the DNS type for use +// with apply. +type DNSApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *DNSSpecApplyConfiguration `json:"spec,omitempty"` + Status *configv1.DNSStatus `json:"status,omitempty"` +} + +// DNS constructs a declarative configuration of the DNS type for use with +// apply. +func DNS(name string) *DNSApplyConfiguration { + b := &DNSApplyConfiguration{} + b.WithName(name) + b.WithKind("DNS") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractDNS extracts the applied configuration owned by fieldManager from +// dNS. If no managedFields are found in dNS for fieldManager, a +// DNSApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// dNS must be a unmodified DNS API object that was retrieved from the Kubernetes API. +// ExtractDNS provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractDNS(dNS *configv1.DNS, fieldManager string) (*DNSApplyConfiguration, error) { + return extractDNS(dNS, fieldManager, "") +} + +// ExtractDNSStatus is the same as ExtractDNS except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractDNSStatus(dNS *configv1.DNS, fieldManager string) (*DNSApplyConfiguration, error) { + return extractDNS(dNS, fieldManager, "status") +} + +func extractDNS(dNS *configv1.DNS, fieldManager string, subresource string) (*DNSApplyConfiguration, error) { + b := &DNSApplyConfiguration{} + err := managedfields.ExtractInto(dNS, internal.Parser().Type("com.github.openshift.api.config.v1.DNS"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(dNS.Name) + + b.WithKind("DNS") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithKind(value string) *DNSApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithAPIVersion(value string) *DNSApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithName(value string) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithGenerateName(value string) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithNamespace(value string) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithUID(value types.UID) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithResourceVersion(value string) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithGeneration(value int64) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *DNSApplyConfiguration) WithLabels(entries map[string]string) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *DNSApplyConfiguration) WithAnnotations(entries map[string]string) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *DNSApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *DNSApplyConfiguration) WithFinalizers(values ...string) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *DNSApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithSpec(value *DNSSpecApplyConfiguration) *DNSApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithStatus(value configv1.DNSStatus) *DNSApplyConfiguration { + b.Status = &value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DNSApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsplatformspec.go new file mode 100644 index 0000000000000..46bf616b268f8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsplatformspec.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// DNSPlatformSpecApplyConfiguration represents a declarative configuration of the DNSPlatformSpec type for use +// with apply. +type DNSPlatformSpecApplyConfiguration struct { + Type *configv1.PlatformType `json:"type,omitempty"` + AWS *AWSDNSSpecApplyConfiguration `json:"aws,omitempty"` +} + +// DNSPlatformSpecApplyConfiguration constructs a declarative configuration of the DNSPlatformSpec type for use with +// apply. +func DNSPlatformSpec() *DNSPlatformSpecApplyConfiguration { + return &DNSPlatformSpecApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *DNSPlatformSpecApplyConfiguration) WithType(value configv1.PlatformType) *DNSPlatformSpecApplyConfiguration { + b.Type = &value + return b +} + +// WithAWS sets the AWS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AWS field is set to the value of the last call. +func (b *DNSPlatformSpecApplyConfiguration) WithAWS(value *AWSDNSSpecApplyConfiguration) *DNSPlatformSpecApplyConfiguration { + b.AWS = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsspec.go new file mode 100644 index 0000000000000..fbc8b60e71787 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsspec.go @@ -0,0 +1,50 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// DNSSpecApplyConfiguration represents a declarative configuration of the DNSSpec type for use +// with apply. +type DNSSpecApplyConfiguration struct { + BaseDomain *string `json:"baseDomain,omitempty"` + PublicZone *DNSZoneApplyConfiguration `json:"publicZone,omitempty"` + PrivateZone *DNSZoneApplyConfiguration `json:"privateZone,omitempty"` + Platform *DNSPlatformSpecApplyConfiguration `json:"platform,omitempty"` +} + +// DNSSpecApplyConfiguration constructs a declarative configuration of the DNSSpec type for use with +// apply. +func DNSSpec() *DNSSpecApplyConfiguration { + return &DNSSpecApplyConfiguration{} +} + +// WithBaseDomain sets the BaseDomain field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BaseDomain field is set to the value of the last call. +func (b *DNSSpecApplyConfiguration) WithBaseDomain(value string) *DNSSpecApplyConfiguration { + b.BaseDomain = &value + return b +} + +// WithPublicZone sets the PublicZone field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PublicZone field is set to the value of the last call. +func (b *DNSSpecApplyConfiguration) WithPublicZone(value *DNSZoneApplyConfiguration) *DNSSpecApplyConfiguration { + b.PublicZone = value + return b +} + +// WithPrivateZone sets the PrivateZone field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PrivateZone field is set to the value of the last call. +func (b *DNSSpecApplyConfiguration) WithPrivateZone(value *DNSZoneApplyConfiguration) *DNSSpecApplyConfiguration { + b.PrivateZone = value + return b +} + +// WithPlatform sets the Platform field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Platform field is set to the value of the last call. +func (b *DNSSpecApplyConfiguration) WithPlatform(value *DNSPlatformSpecApplyConfiguration) *DNSSpecApplyConfiguration { + b.Platform = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnszone.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnszone.go new file mode 100644 index 0000000000000..39ef2776e53eb --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnszone.go @@ -0,0 +1,38 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// DNSZoneApplyConfiguration represents a declarative configuration of the DNSZone type for use +// with apply. +type DNSZoneApplyConfiguration struct { + ID *string `json:"id,omitempty"` + Tags map[string]string `json:"tags,omitempty"` +} + +// DNSZoneApplyConfiguration constructs a declarative configuration of the DNSZone type for use with +// apply. +func DNSZone() *DNSZoneApplyConfiguration { + return &DNSZoneApplyConfiguration{} +} + +// WithID sets the ID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ID field is set to the value of the last call. +func (b *DNSZoneApplyConfiguration) WithID(value string) *DNSZoneApplyConfiguration { + b.ID = &value + return b +} + +// WithTags puts the entries into the Tags field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Tags field, +// overwriting an existing map entries in Tags field with the same key. +func (b *DNSZoneApplyConfiguration) WithTags(entries map[string]string) *DNSZoneApplyConfiguration { + if b.Tags == nil && len(entries) > 0 { + b.Tags = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Tags[k] = v + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/equinixmetalplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/equinixmetalplatformstatus.go new file mode 100644 index 0000000000000..8e17df603f881 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/equinixmetalplatformstatus.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// EquinixMetalPlatformStatusApplyConfiguration represents a declarative configuration of the EquinixMetalPlatformStatus type for use +// with apply. +type EquinixMetalPlatformStatusApplyConfiguration struct { + APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` + IngressIP *string `json:"ingressIP,omitempty"` +} + +// EquinixMetalPlatformStatusApplyConfiguration constructs a declarative configuration of the EquinixMetalPlatformStatus type for use with +// apply. +func EquinixMetalPlatformStatus() *EquinixMetalPlatformStatusApplyConfiguration { + return &EquinixMetalPlatformStatusApplyConfiguration{} +} + +// WithAPIServerInternalIP sets the APIServerInternalIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIServerInternalIP field is set to the value of the last call. +func (b *EquinixMetalPlatformStatusApplyConfiguration) WithAPIServerInternalIP(value string) *EquinixMetalPlatformStatusApplyConfiguration { + b.APIServerInternalIP = &value + return b +} + +// WithIngressIP sets the IngressIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IngressIP field is set to the value of the last call. +func (b *EquinixMetalPlatformStatusApplyConfiguration) WithIngressIP(value string) *EquinixMetalPlatformStatusApplyConfiguration { + b.IngressIP = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalipconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalipconfig.go new file mode 100644 index 0000000000000..d3b9c17466e22 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalipconfig.go @@ -0,0 +1,34 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ExternalIPConfigApplyConfiguration represents a declarative configuration of the ExternalIPConfig type for use +// with apply. +type ExternalIPConfigApplyConfiguration struct { + Policy *ExternalIPPolicyApplyConfiguration `json:"policy,omitempty"` + AutoAssignCIDRs []string `json:"autoAssignCIDRs,omitempty"` +} + +// ExternalIPConfigApplyConfiguration constructs a declarative configuration of the ExternalIPConfig type for use with +// apply. +func ExternalIPConfig() *ExternalIPConfigApplyConfiguration { + return &ExternalIPConfigApplyConfiguration{} +} + +// WithPolicy sets the Policy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Policy field is set to the value of the last call. +func (b *ExternalIPConfigApplyConfiguration) WithPolicy(value *ExternalIPPolicyApplyConfiguration) *ExternalIPConfigApplyConfiguration { + b.Policy = value + return b +} + +// WithAutoAssignCIDRs adds the given value to the AutoAssignCIDRs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AutoAssignCIDRs field. +func (b *ExternalIPConfigApplyConfiguration) WithAutoAssignCIDRs(values ...string) *ExternalIPConfigApplyConfiguration { + for i := range values { + b.AutoAssignCIDRs = append(b.AutoAssignCIDRs, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalippolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalippolicy.go new file mode 100644 index 0000000000000..269d934b90cab --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalippolicy.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ExternalIPPolicyApplyConfiguration represents a declarative configuration of the ExternalIPPolicy type for use +// with apply. +type ExternalIPPolicyApplyConfiguration struct { + AllowedCIDRs []string `json:"allowedCIDRs,omitempty"` + RejectedCIDRs []string `json:"rejectedCIDRs,omitempty"` +} + +// ExternalIPPolicyApplyConfiguration constructs a declarative configuration of the ExternalIPPolicy type for use with +// apply. +func ExternalIPPolicy() *ExternalIPPolicyApplyConfiguration { + return &ExternalIPPolicyApplyConfiguration{} +} + +// WithAllowedCIDRs adds the given value to the AllowedCIDRs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AllowedCIDRs field. +func (b *ExternalIPPolicyApplyConfiguration) WithAllowedCIDRs(values ...string) *ExternalIPPolicyApplyConfiguration { + for i := range values { + b.AllowedCIDRs = append(b.AllowedCIDRs, values[i]) + } + return b +} + +// WithRejectedCIDRs adds the given value to the RejectedCIDRs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RejectedCIDRs field. +func (b *ExternalIPPolicyApplyConfiguration) WithRejectedCIDRs(values ...string) *ExternalIPPolicyApplyConfiguration { + for i := range values { + b.RejectedCIDRs = append(b.RejectedCIDRs, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformspec.go new file mode 100644 index 0000000000000..d7640e14298bb --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformspec.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ExternalPlatformSpecApplyConfiguration represents a declarative configuration of the ExternalPlatformSpec type for use +// with apply. +type ExternalPlatformSpecApplyConfiguration struct { + PlatformName *string `json:"platformName,omitempty"` +} + +// ExternalPlatformSpecApplyConfiguration constructs a declarative configuration of the ExternalPlatformSpec type for use with +// apply. +func ExternalPlatformSpec() *ExternalPlatformSpecApplyConfiguration { + return &ExternalPlatformSpecApplyConfiguration{} +} + +// WithPlatformName sets the PlatformName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PlatformName field is set to the value of the last call. +func (b *ExternalPlatformSpecApplyConfiguration) WithPlatformName(value string) *ExternalPlatformSpecApplyConfiguration { + b.PlatformName = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformstatus.go new file mode 100644 index 0000000000000..65f8f2b109c2a --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformstatus.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ExternalPlatformStatusApplyConfiguration represents a declarative configuration of the ExternalPlatformStatus type for use +// with apply. +type ExternalPlatformStatusApplyConfiguration struct { + CloudControllerManager *CloudControllerManagerStatusApplyConfiguration `json:"cloudControllerManager,omitempty"` +} + +// ExternalPlatformStatusApplyConfiguration constructs a declarative configuration of the ExternalPlatformStatus type for use with +// apply. +func ExternalPlatformStatus() *ExternalPlatformStatusApplyConfiguration { + return &ExternalPlatformStatusApplyConfiguration{} +} + +// WithCloudControllerManager sets the CloudControllerManager field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CloudControllerManager field is set to the value of the last call. +func (b *ExternalPlatformStatusApplyConfiguration) WithCloudControllerManager(value *CloudControllerManagerStatusApplyConfiguration) *ExternalPlatformStatusApplyConfiguration { + b.CloudControllerManager = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregate.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregate.go new file mode 100644 index 0000000000000..73ec53314771f --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregate.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// FeatureGateApplyConfiguration represents a declarative configuration of the FeatureGate type for use +// with apply. +type FeatureGateApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *FeatureGateSpecApplyConfiguration `json:"spec,omitempty"` + Status *FeatureGateStatusApplyConfiguration `json:"status,omitempty"` +} + +// FeatureGate constructs a declarative configuration of the FeatureGate type for use with +// apply. +func FeatureGate(name string) *FeatureGateApplyConfiguration { + b := &FeatureGateApplyConfiguration{} + b.WithName(name) + b.WithKind("FeatureGate") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractFeatureGate extracts the applied configuration owned by fieldManager from +// featureGate. If no managedFields are found in featureGate for fieldManager, a +// FeatureGateApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// featureGate must be a unmodified FeatureGate API object that was retrieved from the Kubernetes API. +// ExtractFeatureGate provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractFeatureGate(featureGate *configv1.FeatureGate, fieldManager string) (*FeatureGateApplyConfiguration, error) { + return extractFeatureGate(featureGate, fieldManager, "") +} + +// ExtractFeatureGateStatus is the same as ExtractFeatureGate except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractFeatureGateStatus(featureGate *configv1.FeatureGate, fieldManager string) (*FeatureGateApplyConfiguration, error) { + return extractFeatureGate(featureGate, fieldManager, "status") +} + +func extractFeatureGate(featureGate *configv1.FeatureGate, fieldManager string, subresource string) (*FeatureGateApplyConfiguration, error) { + b := &FeatureGateApplyConfiguration{} + err := managedfields.ExtractInto(featureGate, internal.Parser().Type("com.github.openshift.api.config.v1.FeatureGate"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(featureGate.Name) + + b.WithKind("FeatureGate") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *FeatureGateApplyConfiguration) WithKind(value string) *FeatureGateApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *FeatureGateApplyConfiguration) WithAPIVersion(value string) *FeatureGateApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *FeatureGateApplyConfiguration) WithName(value string) *FeatureGateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *FeatureGateApplyConfiguration) WithGenerateName(value string) *FeatureGateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *FeatureGateApplyConfiguration) WithNamespace(value string) *FeatureGateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *FeatureGateApplyConfiguration) WithUID(value types.UID) *FeatureGateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *FeatureGateApplyConfiguration) WithResourceVersion(value string) *FeatureGateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *FeatureGateApplyConfiguration) WithGeneration(value int64) *FeatureGateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *FeatureGateApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *FeatureGateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *FeatureGateApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *FeatureGateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *FeatureGateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *FeatureGateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *FeatureGateApplyConfiguration) WithLabels(entries map[string]string) *FeatureGateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *FeatureGateApplyConfiguration) WithAnnotations(entries map[string]string) *FeatureGateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *FeatureGateApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *FeatureGateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *FeatureGateApplyConfiguration) WithFinalizers(values ...string) *FeatureGateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *FeatureGateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *FeatureGateApplyConfiguration) WithSpec(value *FeatureGateSpecApplyConfiguration) *FeatureGateApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *FeatureGateApplyConfiguration) WithStatus(value *FeatureGateStatusApplyConfiguration) *FeatureGateApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *FeatureGateApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateattributes.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateattributes.go new file mode 100644 index 0000000000000..7884ec2872aff --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateattributes.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// FeatureGateAttributesApplyConfiguration represents a declarative configuration of the FeatureGateAttributes type for use +// with apply. +type FeatureGateAttributesApplyConfiguration struct { + Name *configv1.FeatureGateName `json:"name,omitempty"` +} + +// FeatureGateAttributesApplyConfiguration constructs a declarative configuration of the FeatureGateAttributes type for use with +// apply. +func FeatureGateAttributes() *FeatureGateAttributesApplyConfiguration { + return &FeatureGateAttributesApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *FeatureGateAttributesApplyConfiguration) WithName(value configv1.FeatureGateName) *FeatureGateAttributesApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatedetails.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatedetails.go new file mode 100644 index 0000000000000..c451f74dfdc24 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatedetails.go @@ -0,0 +1,51 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// FeatureGateDetailsApplyConfiguration represents a declarative configuration of the FeatureGateDetails type for use +// with apply. +type FeatureGateDetailsApplyConfiguration struct { + Version *string `json:"version,omitempty"` + Enabled []FeatureGateAttributesApplyConfiguration `json:"enabled,omitempty"` + Disabled []FeatureGateAttributesApplyConfiguration `json:"disabled,omitempty"` +} + +// FeatureGateDetailsApplyConfiguration constructs a declarative configuration of the FeatureGateDetails type for use with +// apply. +func FeatureGateDetails() *FeatureGateDetailsApplyConfiguration { + return &FeatureGateDetailsApplyConfiguration{} +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *FeatureGateDetailsApplyConfiguration) WithVersion(value string) *FeatureGateDetailsApplyConfiguration { + b.Version = &value + return b +} + +// WithEnabled adds the given value to the Enabled field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Enabled field. +func (b *FeatureGateDetailsApplyConfiguration) WithEnabled(values ...*FeatureGateAttributesApplyConfiguration) *FeatureGateDetailsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithEnabled") + } + b.Enabled = append(b.Enabled, *values[i]) + } + return b +} + +// WithDisabled adds the given value to the Disabled field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Disabled field. +func (b *FeatureGateDetailsApplyConfiguration) WithDisabled(values ...*FeatureGateAttributesApplyConfiguration) *FeatureGateDetailsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithDisabled") + } + b.Disabled = append(b.Disabled, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateselection.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateselection.go new file mode 100644 index 0000000000000..b79d3f883c9b8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateselection.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// FeatureGateSelectionApplyConfiguration represents a declarative configuration of the FeatureGateSelection type for use +// with apply. +type FeatureGateSelectionApplyConfiguration struct { + FeatureSet *configv1.FeatureSet `json:"featureSet,omitempty"` + CustomNoUpgrade *CustomFeatureGatesApplyConfiguration `json:"customNoUpgrade,omitempty"` +} + +// FeatureGateSelectionApplyConfiguration constructs a declarative configuration of the FeatureGateSelection type for use with +// apply. +func FeatureGateSelection() *FeatureGateSelectionApplyConfiguration { + return &FeatureGateSelectionApplyConfiguration{} +} + +// WithFeatureSet sets the FeatureSet field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FeatureSet field is set to the value of the last call. +func (b *FeatureGateSelectionApplyConfiguration) WithFeatureSet(value configv1.FeatureSet) *FeatureGateSelectionApplyConfiguration { + b.FeatureSet = &value + return b +} + +// WithCustomNoUpgrade sets the CustomNoUpgrade field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CustomNoUpgrade field is set to the value of the last call. +func (b *FeatureGateSelectionApplyConfiguration) WithCustomNoUpgrade(value *CustomFeatureGatesApplyConfiguration) *FeatureGateSelectionApplyConfiguration { + b.CustomNoUpgrade = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatespec.go new file mode 100644 index 0000000000000..d7e6f5e2b3e5c --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatespec.go @@ -0,0 +1,35 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// FeatureGateSpecApplyConfiguration represents a declarative configuration of the FeatureGateSpec type for use +// with apply. +type FeatureGateSpecApplyConfiguration struct { + FeatureGateSelectionApplyConfiguration `json:",inline"` +} + +// FeatureGateSpecApplyConfiguration constructs a declarative configuration of the FeatureGateSpec type for use with +// apply. +func FeatureGateSpec() *FeatureGateSpecApplyConfiguration { + return &FeatureGateSpecApplyConfiguration{} +} + +// WithFeatureSet sets the FeatureSet field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FeatureSet field is set to the value of the last call. +func (b *FeatureGateSpecApplyConfiguration) WithFeatureSet(value configv1.FeatureSet) *FeatureGateSpecApplyConfiguration { + b.FeatureGateSelectionApplyConfiguration.FeatureSet = &value + return b +} + +// WithCustomNoUpgrade sets the CustomNoUpgrade field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CustomNoUpgrade field is set to the value of the last call. +func (b *FeatureGateSpecApplyConfiguration) WithCustomNoUpgrade(value *CustomFeatureGatesApplyConfiguration) *FeatureGateSpecApplyConfiguration { + b.FeatureGateSelectionApplyConfiguration.CustomNoUpgrade = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatestatus.go new file mode 100644 index 0000000000000..705c3d0cffc25 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatestatus.go @@ -0,0 +1,46 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// FeatureGateStatusApplyConfiguration represents a declarative configuration of the FeatureGateStatus type for use +// with apply. +type FeatureGateStatusApplyConfiguration struct { + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` + FeatureGates []FeatureGateDetailsApplyConfiguration `json:"featureGates,omitempty"` +} + +// FeatureGateStatusApplyConfiguration constructs a declarative configuration of the FeatureGateStatus type for use with +// apply. +func FeatureGateStatus() *FeatureGateStatusApplyConfiguration { + return &FeatureGateStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *FeatureGateStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *FeatureGateStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithFeatureGates adds the given value to the FeatureGates field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the FeatureGates field. +func (b *FeatureGateStatusApplyConfiguration) WithFeatureGates(values ...*FeatureGateDetailsApplyConfiguration) *FeatureGateStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithFeatureGates") + } + b.FeatureGates = append(b.FeatureGates, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpplatformstatus.go new file mode 100644 index 0000000000000..9c28888cf9a46 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpplatformstatus.go @@ -0,0 +1,69 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// GCPPlatformStatusApplyConfiguration represents a declarative configuration of the GCPPlatformStatus type for use +// with apply. +type GCPPlatformStatusApplyConfiguration struct { + ProjectID *string `json:"projectID,omitempty"` + Region *string `json:"region,omitempty"` + ResourceLabels []GCPResourceLabelApplyConfiguration `json:"resourceLabels,omitempty"` + ResourceTags []GCPResourceTagApplyConfiguration `json:"resourceTags,omitempty"` + CloudLoadBalancerConfig *CloudLoadBalancerConfigApplyConfiguration `json:"cloudLoadBalancerConfig,omitempty"` +} + +// GCPPlatformStatusApplyConfiguration constructs a declarative configuration of the GCPPlatformStatus type for use with +// apply. +func GCPPlatformStatus() *GCPPlatformStatusApplyConfiguration { + return &GCPPlatformStatusApplyConfiguration{} +} + +// WithProjectID sets the ProjectID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProjectID field is set to the value of the last call. +func (b *GCPPlatformStatusApplyConfiguration) WithProjectID(value string) *GCPPlatformStatusApplyConfiguration { + b.ProjectID = &value + return b +} + +// WithRegion sets the Region field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Region field is set to the value of the last call. +func (b *GCPPlatformStatusApplyConfiguration) WithRegion(value string) *GCPPlatformStatusApplyConfiguration { + b.Region = &value + return b +} + +// WithResourceLabels adds the given value to the ResourceLabels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceLabels field. +func (b *GCPPlatformStatusApplyConfiguration) WithResourceLabels(values ...*GCPResourceLabelApplyConfiguration) *GCPPlatformStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceLabels") + } + b.ResourceLabels = append(b.ResourceLabels, *values[i]) + } + return b +} + +// WithResourceTags adds the given value to the ResourceTags field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceTags field. +func (b *GCPPlatformStatusApplyConfiguration) WithResourceTags(values ...*GCPResourceTagApplyConfiguration) *GCPPlatformStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceTags") + } + b.ResourceTags = append(b.ResourceTags, *values[i]) + } + return b +} + +// WithCloudLoadBalancerConfig sets the CloudLoadBalancerConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CloudLoadBalancerConfig field is set to the value of the last call. +func (b *GCPPlatformStatusApplyConfiguration) WithCloudLoadBalancerConfig(value *CloudLoadBalancerConfigApplyConfiguration) *GCPPlatformStatusApplyConfiguration { + b.CloudLoadBalancerConfig = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpresourcelabel.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpresourcelabel.go new file mode 100644 index 0000000000000..5d408e45ede9c --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpresourcelabel.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// GCPResourceLabelApplyConfiguration represents a declarative configuration of the GCPResourceLabel type for use +// with apply. +type GCPResourceLabelApplyConfiguration struct { + Key *string `json:"key,omitempty"` + Value *string `json:"value,omitempty"` +} + +// GCPResourceLabelApplyConfiguration constructs a declarative configuration of the GCPResourceLabel type for use with +// apply. +func GCPResourceLabel() *GCPResourceLabelApplyConfiguration { + return &GCPResourceLabelApplyConfiguration{} +} + +// WithKey sets the Key field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Key field is set to the value of the last call. +func (b *GCPResourceLabelApplyConfiguration) WithKey(value string) *GCPResourceLabelApplyConfiguration { + b.Key = &value + return b +} + +// WithValue sets the Value field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Value field is set to the value of the last call. +func (b *GCPResourceLabelApplyConfiguration) WithValue(value string) *GCPResourceLabelApplyConfiguration { + b.Value = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpresourcetag.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpresourcetag.go new file mode 100644 index 0000000000000..8f22d3a54edd1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpresourcetag.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// GCPResourceTagApplyConfiguration represents a declarative configuration of the GCPResourceTag type for use +// with apply. +type GCPResourceTagApplyConfiguration struct { + ParentID *string `json:"parentID,omitempty"` + Key *string `json:"key,omitempty"` + Value *string `json:"value,omitempty"` +} + +// GCPResourceTagApplyConfiguration constructs a declarative configuration of the GCPResourceTag type for use with +// apply. +func GCPResourceTag() *GCPResourceTagApplyConfiguration { + return &GCPResourceTagApplyConfiguration{} +} + +// WithParentID sets the ParentID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParentID field is set to the value of the last call. +func (b *GCPResourceTagApplyConfiguration) WithParentID(value string) *GCPResourceTagApplyConfiguration { + b.ParentID = &value + return b +} + +// WithKey sets the Key field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Key field is set to the value of the last call. +func (b *GCPResourceTagApplyConfiguration) WithKey(value string) *GCPResourceTagApplyConfiguration { + b.Key = &value + return b +} + +// WithValue sets the Value field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Value field is set to the value of the last call. +func (b *GCPResourceTagApplyConfiguration) WithValue(value string) *GCPResourceTagApplyConfiguration { + b.Value = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/githubidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/githubidentityprovider.go new file mode 100644 index 0000000000000..c797463d3691f --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/githubidentityprovider.go @@ -0,0 +1,72 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// GitHubIdentityProviderApplyConfiguration represents a declarative configuration of the GitHubIdentityProvider type for use +// with apply. +type GitHubIdentityProviderApplyConfiguration struct { + ClientID *string `json:"clientID,omitempty"` + ClientSecret *SecretNameReferenceApplyConfiguration `json:"clientSecret,omitempty"` + Organizations []string `json:"organizations,omitempty"` + Teams []string `json:"teams,omitempty"` + Hostname *string `json:"hostname,omitempty"` + CA *ConfigMapNameReferenceApplyConfiguration `json:"ca,omitempty"` +} + +// GitHubIdentityProviderApplyConfiguration constructs a declarative configuration of the GitHubIdentityProvider type for use with +// apply. +func GitHubIdentityProvider() *GitHubIdentityProviderApplyConfiguration { + return &GitHubIdentityProviderApplyConfiguration{} +} + +// WithClientID sets the ClientID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientID field is set to the value of the last call. +func (b *GitHubIdentityProviderApplyConfiguration) WithClientID(value string) *GitHubIdentityProviderApplyConfiguration { + b.ClientID = &value + return b +} + +// WithClientSecret sets the ClientSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientSecret field is set to the value of the last call. +func (b *GitHubIdentityProviderApplyConfiguration) WithClientSecret(value *SecretNameReferenceApplyConfiguration) *GitHubIdentityProviderApplyConfiguration { + b.ClientSecret = value + return b +} + +// WithOrganizations adds the given value to the Organizations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Organizations field. +func (b *GitHubIdentityProviderApplyConfiguration) WithOrganizations(values ...string) *GitHubIdentityProviderApplyConfiguration { + for i := range values { + b.Organizations = append(b.Organizations, values[i]) + } + return b +} + +// WithTeams adds the given value to the Teams field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Teams field. +func (b *GitHubIdentityProviderApplyConfiguration) WithTeams(values ...string) *GitHubIdentityProviderApplyConfiguration { + for i := range values { + b.Teams = append(b.Teams, values[i]) + } + return b +} + +// WithHostname sets the Hostname field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Hostname field is set to the value of the last call. +func (b *GitHubIdentityProviderApplyConfiguration) WithHostname(value string) *GitHubIdentityProviderApplyConfiguration { + b.Hostname = &value + return b +} + +// WithCA sets the CA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CA field is set to the value of the last call. +func (b *GitHubIdentityProviderApplyConfiguration) WithCA(value *ConfigMapNameReferenceApplyConfiguration) *GitHubIdentityProviderApplyConfiguration { + b.CA = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gitlabidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gitlabidentityprovider.go new file mode 100644 index 0000000000000..e6a542e1c0e40 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gitlabidentityprovider.go @@ -0,0 +1,50 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// GitLabIdentityProviderApplyConfiguration represents a declarative configuration of the GitLabIdentityProvider type for use +// with apply. +type GitLabIdentityProviderApplyConfiguration struct { + ClientID *string `json:"clientID,omitempty"` + ClientSecret *SecretNameReferenceApplyConfiguration `json:"clientSecret,omitempty"` + URL *string `json:"url,omitempty"` + CA *ConfigMapNameReferenceApplyConfiguration `json:"ca,omitempty"` +} + +// GitLabIdentityProviderApplyConfiguration constructs a declarative configuration of the GitLabIdentityProvider type for use with +// apply. +func GitLabIdentityProvider() *GitLabIdentityProviderApplyConfiguration { + return &GitLabIdentityProviderApplyConfiguration{} +} + +// WithClientID sets the ClientID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientID field is set to the value of the last call. +func (b *GitLabIdentityProviderApplyConfiguration) WithClientID(value string) *GitLabIdentityProviderApplyConfiguration { + b.ClientID = &value + return b +} + +// WithClientSecret sets the ClientSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientSecret field is set to the value of the last call. +func (b *GitLabIdentityProviderApplyConfiguration) WithClientSecret(value *SecretNameReferenceApplyConfiguration) *GitLabIdentityProviderApplyConfiguration { + b.ClientSecret = value + return b +} + +// WithURL sets the URL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the URL field is set to the value of the last call. +func (b *GitLabIdentityProviderApplyConfiguration) WithURL(value string) *GitLabIdentityProviderApplyConfiguration { + b.URL = &value + return b +} + +// WithCA sets the CA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CA field is set to the value of the last call. +func (b *GitLabIdentityProviderApplyConfiguration) WithCA(value *ConfigMapNameReferenceApplyConfiguration) *GitLabIdentityProviderApplyConfiguration { + b.CA = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/googleidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/googleidentityprovider.go new file mode 100644 index 0000000000000..d828680696519 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/googleidentityprovider.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// GoogleIdentityProviderApplyConfiguration represents a declarative configuration of the GoogleIdentityProvider type for use +// with apply. +type GoogleIdentityProviderApplyConfiguration struct { + ClientID *string `json:"clientID,omitempty"` + ClientSecret *SecretNameReferenceApplyConfiguration `json:"clientSecret,omitempty"` + HostedDomain *string `json:"hostedDomain,omitempty"` +} + +// GoogleIdentityProviderApplyConfiguration constructs a declarative configuration of the GoogleIdentityProvider type for use with +// apply. +func GoogleIdentityProvider() *GoogleIdentityProviderApplyConfiguration { + return &GoogleIdentityProviderApplyConfiguration{} +} + +// WithClientID sets the ClientID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientID field is set to the value of the last call. +func (b *GoogleIdentityProviderApplyConfiguration) WithClientID(value string) *GoogleIdentityProviderApplyConfiguration { + b.ClientID = &value + return b +} + +// WithClientSecret sets the ClientSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientSecret field is set to the value of the last call. +func (b *GoogleIdentityProviderApplyConfiguration) WithClientSecret(value *SecretNameReferenceApplyConfiguration) *GoogleIdentityProviderApplyConfiguration { + b.ClientSecret = value + return b +} + +// WithHostedDomain sets the HostedDomain field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostedDomain field is set to the value of the last call. +func (b *GoogleIdentityProviderApplyConfiguration) WithHostedDomain(value string) *GoogleIdentityProviderApplyConfiguration { + b.HostedDomain = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/htpasswdidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/htpasswdidentityprovider.go new file mode 100644 index 0000000000000..f5c689bbe280d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/htpasswdidentityprovider.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// HTPasswdIdentityProviderApplyConfiguration represents a declarative configuration of the HTPasswdIdentityProvider type for use +// with apply. +type HTPasswdIdentityProviderApplyConfiguration struct { + FileData *SecretNameReferenceApplyConfiguration `json:"fileData,omitempty"` +} + +// HTPasswdIdentityProviderApplyConfiguration constructs a declarative configuration of the HTPasswdIdentityProvider type for use with +// apply. +func HTPasswdIdentityProvider() *HTPasswdIdentityProviderApplyConfiguration { + return &HTPasswdIdentityProviderApplyConfiguration{} +} + +// WithFileData sets the FileData field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FileData field is set to the value of the last call. +func (b *HTPasswdIdentityProviderApplyConfiguration) WithFileData(value *SecretNameReferenceApplyConfiguration) *HTPasswdIdentityProviderApplyConfiguration { + b.FileData = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsource.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsource.go new file mode 100644 index 0000000000000..333802bfecede --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsource.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// HubSourceApplyConfiguration represents a declarative configuration of the HubSource type for use +// with apply. +type HubSourceApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Disabled *bool `json:"disabled,omitempty"` +} + +// HubSourceApplyConfiguration constructs a declarative configuration of the HubSource type for use with +// apply. +func HubSource() *HubSourceApplyConfiguration { + return &HubSourceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *HubSourceApplyConfiguration) WithName(value string) *HubSourceApplyConfiguration { + b.Name = &value + return b +} + +// WithDisabled sets the Disabled field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Disabled field is set to the value of the last call. +func (b *HubSourceApplyConfiguration) WithDisabled(value bool) *HubSourceApplyConfiguration { + b.Disabled = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsourcestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsourcestatus.go new file mode 100644 index 0000000000000..1688b1ce41cd9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsourcestatus.go @@ -0,0 +1,57 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// HubSourceStatusApplyConfiguration represents a declarative configuration of the HubSourceStatus type for use +// with apply. +type HubSourceStatusApplyConfiguration struct { + *HubSourceApplyConfiguration `json:"HubSource,omitempty"` + Status *string `json:"status,omitempty"` + Message *string `json:"message,omitempty"` +} + +// HubSourceStatusApplyConfiguration constructs a declarative configuration of the HubSourceStatus type for use with +// apply. +func HubSourceStatus() *HubSourceStatusApplyConfiguration { + return &HubSourceStatusApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *HubSourceStatusApplyConfiguration) WithName(value string) *HubSourceStatusApplyConfiguration { + b.ensureHubSourceApplyConfigurationExists() + b.HubSourceApplyConfiguration.Name = &value + return b +} + +// WithDisabled sets the Disabled field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Disabled field is set to the value of the last call. +func (b *HubSourceStatusApplyConfiguration) WithDisabled(value bool) *HubSourceStatusApplyConfiguration { + b.ensureHubSourceApplyConfigurationExists() + b.HubSourceApplyConfiguration.Disabled = &value + return b +} + +func (b *HubSourceStatusApplyConfiguration) ensureHubSourceApplyConfigurationExists() { + if b.HubSourceApplyConfiguration == nil { + b.HubSourceApplyConfiguration = &HubSourceApplyConfiguration{} + } +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *HubSourceStatusApplyConfiguration) WithStatus(value string) *HubSourceStatusApplyConfiguration { + b.Status = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *HubSourceStatusApplyConfiguration) WithMessage(value string) *HubSourceStatusApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudplatformstatus.go new file mode 100644 index 0000000000000..48c17c9cb5672 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudplatformstatus.go @@ -0,0 +1,77 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// IBMCloudPlatformStatusApplyConfiguration represents a declarative configuration of the IBMCloudPlatformStatus type for use +// with apply. +type IBMCloudPlatformStatusApplyConfiguration struct { + Location *string `json:"location,omitempty"` + ResourceGroupName *string `json:"resourceGroupName,omitempty"` + ProviderType *configv1.IBMCloudProviderType `json:"providerType,omitempty"` + CISInstanceCRN *string `json:"cisInstanceCRN,omitempty"` + DNSInstanceCRN *string `json:"dnsInstanceCRN,omitempty"` + ServiceEndpoints []IBMCloudServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"` +} + +// IBMCloudPlatformStatusApplyConfiguration constructs a declarative configuration of the IBMCloudPlatformStatus type for use with +// apply. +func IBMCloudPlatformStatus() *IBMCloudPlatformStatusApplyConfiguration { + return &IBMCloudPlatformStatusApplyConfiguration{} +} + +// WithLocation sets the Location field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Location field is set to the value of the last call. +func (b *IBMCloudPlatformStatusApplyConfiguration) WithLocation(value string) *IBMCloudPlatformStatusApplyConfiguration { + b.Location = &value + return b +} + +// WithResourceGroupName sets the ResourceGroupName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceGroupName field is set to the value of the last call. +func (b *IBMCloudPlatformStatusApplyConfiguration) WithResourceGroupName(value string) *IBMCloudPlatformStatusApplyConfiguration { + b.ResourceGroupName = &value + return b +} + +// WithProviderType sets the ProviderType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProviderType field is set to the value of the last call. +func (b *IBMCloudPlatformStatusApplyConfiguration) WithProviderType(value configv1.IBMCloudProviderType) *IBMCloudPlatformStatusApplyConfiguration { + b.ProviderType = &value + return b +} + +// WithCISInstanceCRN sets the CISInstanceCRN field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CISInstanceCRN field is set to the value of the last call. +func (b *IBMCloudPlatformStatusApplyConfiguration) WithCISInstanceCRN(value string) *IBMCloudPlatformStatusApplyConfiguration { + b.CISInstanceCRN = &value + return b +} + +// WithDNSInstanceCRN sets the DNSInstanceCRN field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DNSInstanceCRN field is set to the value of the last call. +func (b *IBMCloudPlatformStatusApplyConfiguration) WithDNSInstanceCRN(value string) *IBMCloudPlatformStatusApplyConfiguration { + b.DNSInstanceCRN = &value + return b +} + +// WithServiceEndpoints adds the given value to the ServiceEndpoints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ServiceEndpoints field. +func (b *IBMCloudPlatformStatusApplyConfiguration) WithServiceEndpoints(values ...*IBMCloudServiceEndpointApplyConfiguration) *IBMCloudPlatformStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithServiceEndpoints") + } + b.ServiceEndpoints = append(b.ServiceEndpoints, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudserviceendpoint.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudserviceendpoint.go new file mode 100644 index 0000000000000..daec88ba5d486 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudserviceendpoint.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// IBMCloudServiceEndpointApplyConfiguration represents a declarative configuration of the IBMCloudServiceEndpoint type for use +// with apply. +type IBMCloudServiceEndpointApplyConfiguration struct { + Name *configv1.IBMCloudServiceName `json:"name,omitempty"` + URL *string `json:"url,omitempty"` +} + +// IBMCloudServiceEndpointApplyConfiguration constructs a declarative configuration of the IBMCloudServiceEndpoint type for use with +// apply. +func IBMCloudServiceEndpoint() *IBMCloudServiceEndpointApplyConfiguration { + return &IBMCloudServiceEndpointApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *IBMCloudServiceEndpointApplyConfiguration) WithName(value configv1.IBMCloudServiceName) *IBMCloudServiceEndpointApplyConfiguration { + b.Name = &value + return b +} + +// WithURL sets the URL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the URL field is set to the value of the last call. +func (b *IBMCloudServiceEndpointApplyConfiguration) WithURL(value string) *IBMCloudServiceEndpointApplyConfiguration { + b.URL = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityprovider.go new file mode 100644 index 0000000000000..4e726d0859b0d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityprovider.go @@ -0,0 +1,117 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// IdentityProviderApplyConfiguration represents a declarative configuration of the IdentityProvider type for use +// with apply. +type IdentityProviderApplyConfiguration struct { + Name *string `json:"name,omitempty"` + MappingMethod *configv1.MappingMethodType `json:"mappingMethod,omitempty"` + IdentityProviderConfigApplyConfiguration `json:",inline"` +} + +// IdentityProviderApplyConfiguration constructs a declarative configuration of the IdentityProvider type for use with +// apply. +func IdentityProvider() *IdentityProviderApplyConfiguration { + return &IdentityProviderApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *IdentityProviderApplyConfiguration) WithName(value string) *IdentityProviderApplyConfiguration { + b.Name = &value + return b +} + +// WithMappingMethod sets the MappingMethod field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MappingMethod field is set to the value of the last call. +func (b *IdentityProviderApplyConfiguration) WithMappingMethod(value configv1.MappingMethodType) *IdentityProviderApplyConfiguration { + b.MappingMethod = &value + return b +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *IdentityProviderApplyConfiguration) WithType(value configv1.IdentityProviderType) *IdentityProviderApplyConfiguration { + b.IdentityProviderConfigApplyConfiguration.Type = &value + return b +} + +// WithBasicAuth sets the BasicAuth field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BasicAuth field is set to the value of the last call. +func (b *IdentityProviderApplyConfiguration) WithBasicAuth(value *BasicAuthIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration { + b.IdentityProviderConfigApplyConfiguration.BasicAuth = value + return b +} + +// WithGitHub sets the GitHub field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GitHub field is set to the value of the last call. +func (b *IdentityProviderApplyConfiguration) WithGitHub(value *GitHubIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration { + b.IdentityProviderConfigApplyConfiguration.GitHub = value + return b +} + +// WithGitLab sets the GitLab field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GitLab field is set to the value of the last call. +func (b *IdentityProviderApplyConfiguration) WithGitLab(value *GitLabIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration { + b.IdentityProviderConfigApplyConfiguration.GitLab = value + return b +} + +// WithGoogle sets the Google field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Google field is set to the value of the last call. +func (b *IdentityProviderApplyConfiguration) WithGoogle(value *GoogleIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration { + b.IdentityProviderConfigApplyConfiguration.Google = value + return b +} + +// WithHTPasswd sets the HTPasswd field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTPasswd field is set to the value of the last call. +func (b *IdentityProviderApplyConfiguration) WithHTPasswd(value *HTPasswdIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration { + b.IdentityProviderConfigApplyConfiguration.HTPasswd = value + return b +} + +// WithKeystone sets the Keystone field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Keystone field is set to the value of the last call. +func (b *IdentityProviderApplyConfiguration) WithKeystone(value *KeystoneIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration { + b.IdentityProviderConfigApplyConfiguration.Keystone = value + return b +} + +// WithLDAP sets the LDAP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LDAP field is set to the value of the last call. +func (b *IdentityProviderApplyConfiguration) WithLDAP(value *LDAPIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration { + b.IdentityProviderConfigApplyConfiguration.LDAP = value + return b +} + +// WithOpenID sets the OpenID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OpenID field is set to the value of the last call. +func (b *IdentityProviderApplyConfiguration) WithOpenID(value *OpenIDIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration { + b.IdentityProviderConfigApplyConfiguration.OpenID = value + return b +} + +// WithRequestHeader sets the RequestHeader field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RequestHeader field is set to the value of the last call. +func (b *IdentityProviderApplyConfiguration) WithRequestHeader(value *RequestHeaderIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration { + b.IdentityProviderConfigApplyConfiguration.RequestHeader = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityproviderconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityproviderconfig.go new file mode 100644 index 0000000000000..1ff6d99a7b805 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityproviderconfig.go @@ -0,0 +1,108 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// IdentityProviderConfigApplyConfiguration represents a declarative configuration of the IdentityProviderConfig type for use +// with apply. +type IdentityProviderConfigApplyConfiguration struct { + Type *configv1.IdentityProviderType `json:"type,omitempty"` + BasicAuth *BasicAuthIdentityProviderApplyConfiguration `json:"basicAuth,omitempty"` + GitHub *GitHubIdentityProviderApplyConfiguration `json:"github,omitempty"` + GitLab *GitLabIdentityProviderApplyConfiguration `json:"gitlab,omitempty"` + Google *GoogleIdentityProviderApplyConfiguration `json:"google,omitempty"` + HTPasswd *HTPasswdIdentityProviderApplyConfiguration `json:"htpasswd,omitempty"` + Keystone *KeystoneIdentityProviderApplyConfiguration `json:"keystone,omitempty"` + LDAP *LDAPIdentityProviderApplyConfiguration `json:"ldap,omitempty"` + OpenID *OpenIDIdentityProviderApplyConfiguration `json:"openID,omitempty"` + RequestHeader *RequestHeaderIdentityProviderApplyConfiguration `json:"requestHeader,omitempty"` +} + +// IdentityProviderConfigApplyConfiguration constructs a declarative configuration of the IdentityProviderConfig type for use with +// apply. +func IdentityProviderConfig() *IdentityProviderConfigApplyConfiguration { + return &IdentityProviderConfigApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *IdentityProviderConfigApplyConfiguration) WithType(value configv1.IdentityProviderType) *IdentityProviderConfigApplyConfiguration { + b.Type = &value + return b +} + +// WithBasicAuth sets the BasicAuth field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BasicAuth field is set to the value of the last call. +func (b *IdentityProviderConfigApplyConfiguration) WithBasicAuth(value *BasicAuthIdentityProviderApplyConfiguration) *IdentityProviderConfigApplyConfiguration { + b.BasicAuth = value + return b +} + +// WithGitHub sets the GitHub field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GitHub field is set to the value of the last call. +func (b *IdentityProviderConfigApplyConfiguration) WithGitHub(value *GitHubIdentityProviderApplyConfiguration) *IdentityProviderConfigApplyConfiguration { + b.GitHub = value + return b +} + +// WithGitLab sets the GitLab field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GitLab field is set to the value of the last call. +func (b *IdentityProviderConfigApplyConfiguration) WithGitLab(value *GitLabIdentityProviderApplyConfiguration) *IdentityProviderConfigApplyConfiguration { + b.GitLab = value + return b +} + +// WithGoogle sets the Google field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Google field is set to the value of the last call. +func (b *IdentityProviderConfigApplyConfiguration) WithGoogle(value *GoogleIdentityProviderApplyConfiguration) *IdentityProviderConfigApplyConfiguration { + b.Google = value + return b +} + +// WithHTPasswd sets the HTPasswd field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTPasswd field is set to the value of the last call. +func (b *IdentityProviderConfigApplyConfiguration) WithHTPasswd(value *HTPasswdIdentityProviderApplyConfiguration) *IdentityProviderConfigApplyConfiguration { + b.HTPasswd = value + return b +} + +// WithKeystone sets the Keystone field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Keystone field is set to the value of the last call. +func (b *IdentityProviderConfigApplyConfiguration) WithKeystone(value *KeystoneIdentityProviderApplyConfiguration) *IdentityProviderConfigApplyConfiguration { + b.Keystone = value + return b +} + +// WithLDAP sets the LDAP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LDAP field is set to the value of the last call. +func (b *IdentityProviderConfigApplyConfiguration) WithLDAP(value *LDAPIdentityProviderApplyConfiguration) *IdentityProviderConfigApplyConfiguration { + b.LDAP = value + return b +} + +// WithOpenID sets the OpenID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OpenID field is set to the value of the last call. +func (b *IdentityProviderConfigApplyConfiguration) WithOpenID(value *OpenIDIdentityProviderApplyConfiguration) *IdentityProviderConfigApplyConfiguration { + b.OpenID = value + return b +} + +// WithRequestHeader sets the RequestHeader field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RequestHeader field is set to the value of the last call. +func (b *IdentityProviderConfigApplyConfiguration) WithRequestHeader(value *RequestHeaderIdentityProviderApplyConfiguration) *IdentityProviderConfigApplyConfiguration { + b.RequestHeader = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/image.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/image.go new file mode 100644 index 0000000000000..63009029ec175 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/image.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ImageApplyConfiguration represents a declarative configuration of the Image type for use +// with apply. +type ImageApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ImageSpecApplyConfiguration `json:"spec,omitempty"` + Status *ImageStatusApplyConfiguration `json:"status,omitempty"` +} + +// Image constructs a declarative configuration of the Image type for use with +// apply. +func Image(name string) *ImageApplyConfiguration { + b := &ImageApplyConfiguration{} + b.WithName(name) + b.WithKind("Image") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractImage extracts the applied configuration owned by fieldManager from +// image. If no managedFields are found in image for fieldManager, a +// ImageApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// image must be a unmodified Image API object that was retrieved from the Kubernetes API. +// ExtractImage provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractImage(image *configv1.Image, fieldManager string) (*ImageApplyConfiguration, error) { + return extractImage(image, fieldManager, "") +} + +// ExtractImageStatus is the same as ExtractImage except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractImageStatus(image *configv1.Image, fieldManager string) (*ImageApplyConfiguration, error) { + return extractImage(image, fieldManager, "status") +} + +func extractImage(image *configv1.Image, fieldManager string, subresource string) (*ImageApplyConfiguration, error) { + b := &ImageApplyConfiguration{} + err := managedfields.ExtractInto(image, internal.Parser().Type("com.github.openshift.api.config.v1.Image"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(image.Name) + + b.WithKind("Image") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithKind(value string) *ImageApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithAPIVersion(value string) *ImageApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithName(value string) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithGenerateName(value string) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithNamespace(value string) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithUID(value types.UID) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithResourceVersion(value string) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithGeneration(value int64) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ImageApplyConfiguration) WithLabels(entries map[string]string) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ImageApplyConfiguration) WithAnnotations(entries map[string]string) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ImageApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ImageApplyConfiguration) WithFinalizers(values ...string) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ImageApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithSpec(value *ImageSpecApplyConfiguration) *ImageApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithStatus(value *ImageStatusApplyConfiguration) *ImageApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ImageApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicy.go new file mode 100644 index 0000000000000..6d47fac038ae9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicy.go @@ -0,0 +1,237 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ImageContentPolicyApplyConfiguration represents a declarative configuration of the ImageContentPolicy type for use +// with apply. +type ImageContentPolicyApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ImageContentPolicySpecApplyConfiguration `json:"spec,omitempty"` +} + +// ImageContentPolicy constructs a declarative configuration of the ImageContentPolicy type for use with +// apply. +func ImageContentPolicy(name string) *ImageContentPolicyApplyConfiguration { + b := &ImageContentPolicyApplyConfiguration{} + b.WithName(name) + b.WithKind("ImageContentPolicy") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractImageContentPolicy extracts the applied configuration owned by fieldManager from +// imageContentPolicy. If no managedFields are found in imageContentPolicy for fieldManager, a +// ImageContentPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// imageContentPolicy must be a unmodified ImageContentPolicy API object that was retrieved from the Kubernetes API. +// ExtractImageContentPolicy provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractImageContentPolicy(imageContentPolicy *configv1.ImageContentPolicy, fieldManager string) (*ImageContentPolicyApplyConfiguration, error) { + return extractImageContentPolicy(imageContentPolicy, fieldManager, "") +} + +// ExtractImageContentPolicyStatus is the same as ExtractImageContentPolicy except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractImageContentPolicyStatus(imageContentPolicy *configv1.ImageContentPolicy, fieldManager string) (*ImageContentPolicyApplyConfiguration, error) { + return extractImageContentPolicy(imageContentPolicy, fieldManager, "status") +} + +func extractImageContentPolicy(imageContentPolicy *configv1.ImageContentPolicy, fieldManager string, subresource string) (*ImageContentPolicyApplyConfiguration, error) { + b := &ImageContentPolicyApplyConfiguration{} + err := managedfields.ExtractInto(imageContentPolicy, internal.Parser().Type("com.github.openshift.api.config.v1.ImageContentPolicy"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(imageContentPolicy.Name) + + b.WithKind("ImageContentPolicy") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ImageContentPolicyApplyConfiguration) WithKind(value string) *ImageContentPolicyApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ImageContentPolicyApplyConfiguration) WithAPIVersion(value string) *ImageContentPolicyApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImageContentPolicyApplyConfiguration) WithName(value string) *ImageContentPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ImageContentPolicyApplyConfiguration) WithGenerateName(value string) *ImageContentPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ImageContentPolicyApplyConfiguration) WithNamespace(value string) *ImageContentPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ImageContentPolicyApplyConfiguration) WithUID(value types.UID) *ImageContentPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ImageContentPolicyApplyConfiguration) WithResourceVersion(value string) *ImageContentPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ImageContentPolicyApplyConfiguration) WithGeneration(value int64) *ImageContentPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ImageContentPolicyApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ImageContentPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ImageContentPolicyApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ImageContentPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ImageContentPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageContentPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ImageContentPolicyApplyConfiguration) WithLabels(entries map[string]string) *ImageContentPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ImageContentPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ImageContentPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ImageContentPolicyApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ImageContentPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ImageContentPolicyApplyConfiguration) WithFinalizers(values ...string) *ImageContentPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ImageContentPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ImageContentPolicyApplyConfiguration) WithSpec(value *ImageContentPolicySpecApplyConfiguration) *ImageContentPolicyApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ImageContentPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicyspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicyspec.go new file mode 100644 index 0000000000000..ea674157cb5f2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicyspec.go @@ -0,0 +1,28 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ImageContentPolicySpecApplyConfiguration represents a declarative configuration of the ImageContentPolicySpec type for use +// with apply. +type ImageContentPolicySpecApplyConfiguration struct { + RepositoryDigestMirrors []RepositoryDigestMirrorsApplyConfiguration `json:"repositoryDigestMirrors,omitempty"` +} + +// ImageContentPolicySpecApplyConfiguration constructs a declarative configuration of the ImageContentPolicySpec type for use with +// apply. +func ImageContentPolicySpec() *ImageContentPolicySpecApplyConfiguration { + return &ImageContentPolicySpecApplyConfiguration{} +} + +// WithRepositoryDigestMirrors adds the given value to the RepositoryDigestMirrors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RepositoryDigestMirrors field. +func (b *ImageContentPolicySpecApplyConfiguration) WithRepositoryDigestMirrors(values ...*RepositoryDigestMirrorsApplyConfiguration) *ImageContentPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRepositoryDigestMirrors") + } + b.RepositoryDigestMirrors = append(b.RepositoryDigestMirrors, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrors.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrors.go new file mode 100644 index 0000000000000..d6c57cb7f56a6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrors.go @@ -0,0 +1,47 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// ImageDigestMirrorsApplyConfiguration represents a declarative configuration of the ImageDigestMirrors type for use +// with apply. +type ImageDigestMirrorsApplyConfiguration struct { + Source *string `json:"source,omitempty"` + Mirrors []configv1.ImageMirror `json:"mirrors,omitempty"` + MirrorSourcePolicy *configv1.MirrorSourcePolicy `json:"mirrorSourcePolicy,omitempty"` +} + +// ImageDigestMirrorsApplyConfiguration constructs a declarative configuration of the ImageDigestMirrors type for use with +// apply. +func ImageDigestMirrors() *ImageDigestMirrorsApplyConfiguration { + return &ImageDigestMirrorsApplyConfiguration{} +} + +// WithSource sets the Source field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Source field is set to the value of the last call. +func (b *ImageDigestMirrorsApplyConfiguration) WithSource(value string) *ImageDigestMirrorsApplyConfiguration { + b.Source = &value + return b +} + +// WithMirrors adds the given value to the Mirrors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Mirrors field. +func (b *ImageDigestMirrorsApplyConfiguration) WithMirrors(values ...configv1.ImageMirror) *ImageDigestMirrorsApplyConfiguration { + for i := range values { + b.Mirrors = append(b.Mirrors, values[i]) + } + return b +} + +// WithMirrorSourcePolicy sets the MirrorSourcePolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MirrorSourcePolicy field is set to the value of the last call. +func (b *ImageDigestMirrorsApplyConfiguration) WithMirrorSourcePolicy(value configv1.MirrorSourcePolicy) *ImageDigestMirrorsApplyConfiguration { + b.MirrorSourcePolicy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorset.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorset.go new file mode 100644 index 0000000000000..f3c5ca21a4025 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorset.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ImageDigestMirrorSetApplyConfiguration represents a declarative configuration of the ImageDigestMirrorSet type for use +// with apply. +type ImageDigestMirrorSetApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ImageDigestMirrorSetSpecApplyConfiguration `json:"spec,omitempty"` + Status *configv1.ImageDigestMirrorSetStatus `json:"status,omitempty"` +} + +// ImageDigestMirrorSet constructs a declarative configuration of the ImageDigestMirrorSet type for use with +// apply. +func ImageDigestMirrorSet(name string) *ImageDigestMirrorSetApplyConfiguration { + b := &ImageDigestMirrorSetApplyConfiguration{} + b.WithName(name) + b.WithKind("ImageDigestMirrorSet") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractImageDigestMirrorSet extracts the applied configuration owned by fieldManager from +// imageDigestMirrorSet. If no managedFields are found in imageDigestMirrorSet for fieldManager, a +// ImageDigestMirrorSetApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// imageDigestMirrorSet must be a unmodified ImageDigestMirrorSet API object that was retrieved from the Kubernetes API. +// ExtractImageDigestMirrorSet provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractImageDigestMirrorSet(imageDigestMirrorSet *configv1.ImageDigestMirrorSet, fieldManager string) (*ImageDigestMirrorSetApplyConfiguration, error) { + return extractImageDigestMirrorSet(imageDigestMirrorSet, fieldManager, "") +} + +// ExtractImageDigestMirrorSetStatus is the same as ExtractImageDigestMirrorSet except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractImageDigestMirrorSetStatus(imageDigestMirrorSet *configv1.ImageDigestMirrorSet, fieldManager string) (*ImageDigestMirrorSetApplyConfiguration, error) { + return extractImageDigestMirrorSet(imageDigestMirrorSet, fieldManager, "status") +} + +func extractImageDigestMirrorSet(imageDigestMirrorSet *configv1.ImageDigestMirrorSet, fieldManager string, subresource string) (*ImageDigestMirrorSetApplyConfiguration, error) { + b := &ImageDigestMirrorSetApplyConfiguration{} + err := managedfields.ExtractInto(imageDigestMirrorSet, internal.Parser().Type("com.github.openshift.api.config.v1.ImageDigestMirrorSet"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(imageDigestMirrorSet.Name) + + b.WithKind("ImageDigestMirrorSet") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ImageDigestMirrorSetApplyConfiguration) WithKind(value string) *ImageDigestMirrorSetApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ImageDigestMirrorSetApplyConfiguration) WithAPIVersion(value string) *ImageDigestMirrorSetApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImageDigestMirrorSetApplyConfiguration) WithName(value string) *ImageDigestMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ImageDigestMirrorSetApplyConfiguration) WithGenerateName(value string) *ImageDigestMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ImageDigestMirrorSetApplyConfiguration) WithNamespace(value string) *ImageDigestMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ImageDigestMirrorSetApplyConfiguration) WithUID(value types.UID) *ImageDigestMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ImageDigestMirrorSetApplyConfiguration) WithResourceVersion(value string) *ImageDigestMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ImageDigestMirrorSetApplyConfiguration) WithGeneration(value int64) *ImageDigestMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ImageDigestMirrorSetApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ImageDigestMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ImageDigestMirrorSetApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ImageDigestMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ImageDigestMirrorSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageDigestMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ImageDigestMirrorSetApplyConfiguration) WithLabels(entries map[string]string) *ImageDigestMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ImageDigestMirrorSetApplyConfiguration) WithAnnotations(entries map[string]string) *ImageDigestMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ImageDigestMirrorSetApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ImageDigestMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ImageDigestMirrorSetApplyConfiguration) WithFinalizers(values ...string) *ImageDigestMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ImageDigestMirrorSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ImageDigestMirrorSetApplyConfiguration) WithSpec(value *ImageDigestMirrorSetSpecApplyConfiguration) *ImageDigestMirrorSetApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ImageDigestMirrorSetApplyConfiguration) WithStatus(value configv1.ImageDigestMirrorSetStatus) *ImageDigestMirrorSetApplyConfiguration { + b.Status = &value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ImageDigestMirrorSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorsetspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorsetspec.go new file mode 100644 index 0000000000000..fbb9d48ca3530 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorsetspec.go @@ -0,0 +1,28 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ImageDigestMirrorSetSpecApplyConfiguration represents a declarative configuration of the ImageDigestMirrorSetSpec type for use +// with apply. +type ImageDigestMirrorSetSpecApplyConfiguration struct { + ImageDigestMirrors []ImageDigestMirrorsApplyConfiguration `json:"imageDigestMirrors,omitempty"` +} + +// ImageDigestMirrorSetSpecApplyConfiguration constructs a declarative configuration of the ImageDigestMirrorSetSpec type for use with +// apply. +func ImageDigestMirrorSetSpec() *ImageDigestMirrorSetSpecApplyConfiguration { + return &ImageDigestMirrorSetSpecApplyConfiguration{} +} + +// WithImageDigestMirrors adds the given value to the ImageDigestMirrors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ImageDigestMirrors field. +func (b *ImageDigestMirrorSetSpecApplyConfiguration) WithImageDigestMirrors(values ...*ImageDigestMirrorsApplyConfiguration) *ImageDigestMirrorSetSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithImageDigestMirrors") + } + b.ImageDigestMirrors = append(b.ImageDigestMirrors, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagelabel.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagelabel.go new file mode 100644 index 0000000000000..1d19105474467 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagelabel.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ImageLabelApplyConfiguration represents a declarative configuration of the ImageLabel type for use +// with apply. +type ImageLabelApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} + +// ImageLabelApplyConfiguration constructs a declarative configuration of the ImageLabel type for use with +// apply. +func ImageLabel() *ImageLabelApplyConfiguration { + return &ImageLabelApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImageLabelApplyConfiguration) WithName(value string) *ImageLabelApplyConfiguration { + b.Name = &value + return b +} + +// WithValue sets the Value field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Value field is set to the value of the last call. +func (b *ImageLabelApplyConfiguration) WithValue(value string) *ImageLabelApplyConfiguration { + b.Value = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagespec.go new file mode 100644 index 0000000000000..2c3bf26874200 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagespec.go @@ -0,0 +1,70 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// ImageSpecApplyConfiguration represents a declarative configuration of the ImageSpec type for use +// with apply. +type ImageSpecApplyConfiguration struct { + AllowedRegistriesForImport []RegistryLocationApplyConfiguration `json:"allowedRegistriesForImport,omitempty"` + ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"` + AdditionalTrustedCA *ConfigMapNameReferenceApplyConfiguration `json:"additionalTrustedCA,omitempty"` + RegistrySources *RegistrySourcesApplyConfiguration `json:"registrySources,omitempty"` + ImageStreamImportMode *configv1.ImportModeType `json:"imageStreamImportMode,omitempty"` +} + +// ImageSpecApplyConfiguration constructs a declarative configuration of the ImageSpec type for use with +// apply. +func ImageSpec() *ImageSpecApplyConfiguration { + return &ImageSpecApplyConfiguration{} +} + +// WithAllowedRegistriesForImport adds the given value to the AllowedRegistriesForImport field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AllowedRegistriesForImport field. +func (b *ImageSpecApplyConfiguration) WithAllowedRegistriesForImport(values ...*RegistryLocationApplyConfiguration) *ImageSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAllowedRegistriesForImport") + } + b.AllowedRegistriesForImport = append(b.AllowedRegistriesForImport, *values[i]) + } + return b +} + +// WithExternalRegistryHostnames adds the given value to the ExternalRegistryHostnames field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExternalRegistryHostnames field. +func (b *ImageSpecApplyConfiguration) WithExternalRegistryHostnames(values ...string) *ImageSpecApplyConfiguration { + for i := range values { + b.ExternalRegistryHostnames = append(b.ExternalRegistryHostnames, values[i]) + } + return b +} + +// WithAdditionalTrustedCA sets the AdditionalTrustedCA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AdditionalTrustedCA field is set to the value of the last call. +func (b *ImageSpecApplyConfiguration) WithAdditionalTrustedCA(value *ConfigMapNameReferenceApplyConfiguration) *ImageSpecApplyConfiguration { + b.AdditionalTrustedCA = value + return b +} + +// WithRegistrySources sets the RegistrySources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RegistrySources field is set to the value of the last call. +func (b *ImageSpecApplyConfiguration) WithRegistrySources(value *RegistrySourcesApplyConfiguration) *ImageSpecApplyConfiguration { + b.RegistrySources = value + return b +} + +// WithImageStreamImportMode sets the ImageStreamImportMode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageStreamImportMode field is set to the value of the last call. +func (b *ImageSpecApplyConfiguration) WithImageStreamImportMode(value configv1.ImportModeType) *ImageSpecApplyConfiguration { + b.ImageStreamImportMode = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagestatus.go new file mode 100644 index 0000000000000..cbf8a208a9515 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagestatus.go @@ -0,0 +1,47 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// ImageStatusApplyConfiguration represents a declarative configuration of the ImageStatus type for use +// with apply. +type ImageStatusApplyConfiguration struct { + InternalRegistryHostname *string `json:"internalRegistryHostname,omitempty"` + ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"` + ImageStreamImportMode *configv1.ImportModeType `json:"imageStreamImportMode,omitempty"` +} + +// ImageStatusApplyConfiguration constructs a declarative configuration of the ImageStatus type for use with +// apply. +func ImageStatus() *ImageStatusApplyConfiguration { + return &ImageStatusApplyConfiguration{} +} + +// WithInternalRegistryHostname sets the InternalRegistryHostname field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the InternalRegistryHostname field is set to the value of the last call. +func (b *ImageStatusApplyConfiguration) WithInternalRegistryHostname(value string) *ImageStatusApplyConfiguration { + b.InternalRegistryHostname = &value + return b +} + +// WithExternalRegistryHostnames adds the given value to the ExternalRegistryHostnames field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExternalRegistryHostnames field. +func (b *ImageStatusApplyConfiguration) WithExternalRegistryHostnames(values ...string) *ImageStatusApplyConfiguration { + for i := range values { + b.ExternalRegistryHostnames = append(b.ExternalRegistryHostnames, values[i]) + } + return b +} + +// WithImageStreamImportMode sets the ImageStreamImportMode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageStreamImportMode field is set to the value of the last call. +func (b *ImageStatusApplyConfiguration) WithImageStreamImportMode(value configv1.ImportModeType) *ImageStatusApplyConfiguration { + b.ImageStreamImportMode = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrors.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrors.go new file mode 100644 index 0000000000000..e0baa99fc5611 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrors.go @@ -0,0 +1,47 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// ImageTagMirrorsApplyConfiguration represents a declarative configuration of the ImageTagMirrors type for use +// with apply. +type ImageTagMirrorsApplyConfiguration struct { + Source *string `json:"source,omitempty"` + Mirrors []configv1.ImageMirror `json:"mirrors,omitempty"` + MirrorSourcePolicy *configv1.MirrorSourcePolicy `json:"mirrorSourcePolicy,omitempty"` +} + +// ImageTagMirrorsApplyConfiguration constructs a declarative configuration of the ImageTagMirrors type for use with +// apply. +func ImageTagMirrors() *ImageTagMirrorsApplyConfiguration { + return &ImageTagMirrorsApplyConfiguration{} +} + +// WithSource sets the Source field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Source field is set to the value of the last call. +func (b *ImageTagMirrorsApplyConfiguration) WithSource(value string) *ImageTagMirrorsApplyConfiguration { + b.Source = &value + return b +} + +// WithMirrors adds the given value to the Mirrors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Mirrors field. +func (b *ImageTagMirrorsApplyConfiguration) WithMirrors(values ...configv1.ImageMirror) *ImageTagMirrorsApplyConfiguration { + for i := range values { + b.Mirrors = append(b.Mirrors, values[i]) + } + return b +} + +// WithMirrorSourcePolicy sets the MirrorSourcePolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MirrorSourcePolicy field is set to the value of the last call. +func (b *ImageTagMirrorsApplyConfiguration) WithMirrorSourcePolicy(value configv1.MirrorSourcePolicy) *ImageTagMirrorsApplyConfiguration { + b.MirrorSourcePolicy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorset.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorset.go new file mode 100644 index 0000000000000..b8a9de19219ff --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorset.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ImageTagMirrorSetApplyConfiguration represents a declarative configuration of the ImageTagMirrorSet type for use +// with apply. +type ImageTagMirrorSetApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ImageTagMirrorSetSpecApplyConfiguration `json:"spec,omitempty"` + Status *configv1.ImageTagMirrorSetStatus `json:"status,omitempty"` +} + +// ImageTagMirrorSet constructs a declarative configuration of the ImageTagMirrorSet type for use with +// apply. +func ImageTagMirrorSet(name string) *ImageTagMirrorSetApplyConfiguration { + b := &ImageTagMirrorSetApplyConfiguration{} + b.WithName(name) + b.WithKind("ImageTagMirrorSet") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractImageTagMirrorSet extracts the applied configuration owned by fieldManager from +// imageTagMirrorSet. If no managedFields are found in imageTagMirrorSet for fieldManager, a +// ImageTagMirrorSetApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// imageTagMirrorSet must be a unmodified ImageTagMirrorSet API object that was retrieved from the Kubernetes API. +// ExtractImageTagMirrorSet provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractImageTagMirrorSet(imageTagMirrorSet *configv1.ImageTagMirrorSet, fieldManager string) (*ImageTagMirrorSetApplyConfiguration, error) { + return extractImageTagMirrorSet(imageTagMirrorSet, fieldManager, "") +} + +// ExtractImageTagMirrorSetStatus is the same as ExtractImageTagMirrorSet except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractImageTagMirrorSetStatus(imageTagMirrorSet *configv1.ImageTagMirrorSet, fieldManager string) (*ImageTagMirrorSetApplyConfiguration, error) { + return extractImageTagMirrorSet(imageTagMirrorSet, fieldManager, "status") +} + +func extractImageTagMirrorSet(imageTagMirrorSet *configv1.ImageTagMirrorSet, fieldManager string, subresource string) (*ImageTagMirrorSetApplyConfiguration, error) { + b := &ImageTagMirrorSetApplyConfiguration{} + err := managedfields.ExtractInto(imageTagMirrorSet, internal.Parser().Type("com.github.openshift.api.config.v1.ImageTagMirrorSet"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(imageTagMirrorSet.Name) + + b.WithKind("ImageTagMirrorSet") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ImageTagMirrorSetApplyConfiguration) WithKind(value string) *ImageTagMirrorSetApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ImageTagMirrorSetApplyConfiguration) WithAPIVersion(value string) *ImageTagMirrorSetApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImageTagMirrorSetApplyConfiguration) WithName(value string) *ImageTagMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ImageTagMirrorSetApplyConfiguration) WithGenerateName(value string) *ImageTagMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ImageTagMirrorSetApplyConfiguration) WithNamespace(value string) *ImageTagMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ImageTagMirrorSetApplyConfiguration) WithUID(value types.UID) *ImageTagMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ImageTagMirrorSetApplyConfiguration) WithResourceVersion(value string) *ImageTagMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ImageTagMirrorSetApplyConfiguration) WithGeneration(value int64) *ImageTagMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ImageTagMirrorSetApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ImageTagMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ImageTagMirrorSetApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ImageTagMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ImageTagMirrorSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageTagMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ImageTagMirrorSetApplyConfiguration) WithLabels(entries map[string]string) *ImageTagMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ImageTagMirrorSetApplyConfiguration) WithAnnotations(entries map[string]string) *ImageTagMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ImageTagMirrorSetApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ImageTagMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ImageTagMirrorSetApplyConfiguration) WithFinalizers(values ...string) *ImageTagMirrorSetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ImageTagMirrorSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ImageTagMirrorSetApplyConfiguration) WithSpec(value *ImageTagMirrorSetSpecApplyConfiguration) *ImageTagMirrorSetApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ImageTagMirrorSetApplyConfiguration) WithStatus(value configv1.ImageTagMirrorSetStatus) *ImageTagMirrorSetApplyConfiguration { + b.Status = &value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ImageTagMirrorSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorsetspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorsetspec.go new file mode 100644 index 0000000000000..ca59c387148dd --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorsetspec.go @@ -0,0 +1,28 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ImageTagMirrorSetSpecApplyConfiguration represents a declarative configuration of the ImageTagMirrorSetSpec type for use +// with apply. +type ImageTagMirrorSetSpecApplyConfiguration struct { + ImageTagMirrors []ImageTagMirrorsApplyConfiguration `json:"imageTagMirrors,omitempty"` +} + +// ImageTagMirrorSetSpecApplyConfiguration constructs a declarative configuration of the ImageTagMirrorSetSpec type for use with +// apply. +func ImageTagMirrorSetSpec() *ImageTagMirrorSetSpecApplyConfiguration { + return &ImageTagMirrorSetSpecApplyConfiguration{} +} + +// WithImageTagMirrors adds the given value to the ImageTagMirrors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ImageTagMirrors field. +func (b *ImageTagMirrorSetSpecApplyConfiguration) WithImageTagMirrors(values ...*ImageTagMirrorsApplyConfiguration) *ImageTagMirrorSetSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithImageTagMirrors") + } + b.ImageTagMirrors = append(b.ImageTagMirrors, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructure.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructure.go new file mode 100644 index 0000000000000..970391cfade2c --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructure.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// InfrastructureApplyConfiguration represents a declarative configuration of the Infrastructure type for use +// with apply. +type InfrastructureApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *InfrastructureSpecApplyConfiguration `json:"spec,omitempty"` + Status *InfrastructureStatusApplyConfiguration `json:"status,omitempty"` +} + +// Infrastructure constructs a declarative configuration of the Infrastructure type for use with +// apply. +func Infrastructure(name string) *InfrastructureApplyConfiguration { + b := &InfrastructureApplyConfiguration{} + b.WithName(name) + b.WithKind("Infrastructure") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractInfrastructure extracts the applied configuration owned by fieldManager from +// infrastructure. If no managedFields are found in infrastructure for fieldManager, a +// InfrastructureApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// infrastructure must be a unmodified Infrastructure API object that was retrieved from the Kubernetes API. +// ExtractInfrastructure provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractInfrastructure(infrastructure *configv1.Infrastructure, fieldManager string) (*InfrastructureApplyConfiguration, error) { + return extractInfrastructure(infrastructure, fieldManager, "") +} + +// ExtractInfrastructureStatus is the same as ExtractInfrastructure except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractInfrastructureStatus(infrastructure *configv1.Infrastructure, fieldManager string) (*InfrastructureApplyConfiguration, error) { + return extractInfrastructure(infrastructure, fieldManager, "status") +} + +func extractInfrastructure(infrastructure *configv1.Infrastructure, fieldManager string, subresource string) (*InfrastructureApplyConfiguration, error) { + b := &InfrastructureApplyConfiguration{} + err := managedfields.ExtractInto(infrastructure, internal.Parser().Type("com.github.openshift.api.config.v1.Infrastructure"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(infrastructure.Name) + + b.WithKind("Infrastructure") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *InfrastructureApplyConfiguration) WithKind(value string) *InfrastructureApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *InfrastructureApplyConfiguration) WithAPIVersion(value string) *InfrastructureApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *InfrastructureApplyConfiguration) WithName(value string) *InfrastructureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *InfrastructureApplyConfiguration) WithGenerateName(value string) *InfrastructureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *InfrastructureApplyConfiguration) WithNamespace(value string) *InfrastructureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *InfrastructureApplyConfiguration) WithUID(value types.UID) *InfrastructureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *InfrastructureApplyConfiguration) WithResourceVersion(value string) *InfrastructureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *InfrastructureApplyConfiguration) WithGeneration(value int64) *InfrastructureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *InfrastructureApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *InfrastructureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *InfrastructureApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *InfrastructureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *InfrastructureApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *InfrastructureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *InfrastructureApplyConfiguration) WithLabels(entries map[string]string) *InfrastructureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *InfrastructureApplyConfiguration) WithAnnotations(entries map[string]string) *InfrastructureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *InfrastructureApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *InfrastructureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *InfrastructureApplyConfiguration) WithFinalizers(values ...string) *InfrastructureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *InfrastructureApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *InfrastructureApplyConfiguration) WithSpec(value *InfrastructureSpecApplyConfiguration) *InfrastructureApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *InfrastructureApplyConfiguration) WithStatus(value *InfrastructureStatusApplyConfiguration) *InfrastructureApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *InfrastructureApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurespec.go new file mode 100644 index 0000000000000..83dccde29ee7e --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurespec.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// InfrastructureSpecApplyConfiguration represents a declarative configuration of the InfrastructureSpec type for use +// with apply. +type InfrastructureSpecApplyConfiguration struct { + CloudConfig *ConfigMapFileReferenceApplyConfiguration `json:"cloudConfig,omitempty"` + PlatformSpec *PlatformSpecApplyConfiguration `json:"platformSpec,omitempty"` +} + +// InfrastructureSpecApplyConfiguration constructs a declarative configuration of the InfrastructureSpec type for use with +// apply. +func InfrastructureSpec() *InfrastructureSpecApplyConfiguration { + return &InfrastructureSpecApplyConfiguration{} +} + +// WithCloudConfig sets the CloudConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CloudConfig field is set to the value of the last call. +func (b *InfrastructureSpecApplyConfiguration) WithCloudConfig(value *ConfigMapFileReferenceApplyConfiguration) *InfrastructureSpecApplyConfiguration { + b.CloudConfig = value + return b +} + +// WithPlatformSpec sets the PlatformSpec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PlatformSpec field is set to the value of the last call. +func (b *InfrastructureSpecApplyConfiguration) WithPlatformSpec(value *PlatformSpecApplyConfiguration) *InfrastructureSpecApplyConfiguration { + b.PlatformSpec = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurestatus.go new file mode 100644 index 0000000000000..5b5d8288c2720 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurestatus.go @@ -0,0 +1,99 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// InfrastructureStatusApplyConfiguration represents a declarative configuration of the InfrastructureStatus type for use +// with apply. +type InfrastructureStatusApplyConfiguration struct { + InfrastructureName *string `json:"infrastructureName,omitempty"` + Platform *configv1.PlatformType `json:"platform,omitempty"` + PlatformStatus *PlatformStatusApplyConfiguration `json:"platformStatus,omitempty"` + EtcdDiscoveryDomain *string `json:"etcdDiscoveryDomain,omitempty"` + APIServerURL *string `json:"apiServerURL,omitempty"` + APIServerInternalURL *string `json:"apiServerInternalURI,omitempty"` + ControlPlaneTopology *configv1.TopologyMode `json:"controlPlaneTopology,omitempty"` + InfrastructureTopology *configv1.TopologyMode `json:"infrastructureTopology,omitempty"` + CPUPartitioning *configv1.CPUPartitioningMode `json:"cpuPartitioning,omitempty"` +} + +// InfrastructureStatusApplyConfiguration constructs a declarative configuration of the InfrastructureStatus type for use with +// apply. +func InfrastructureStatus() *InfrastructureStatusApplyConfiguration { + return &InfrastructureStatusApplyConfiguration{} +} + +// WithInfrastructureName sets the InfrastructureName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the InfrastructureName field is set to the value of the last call. +func (b *InfrastructureStatusApplyConfiguration) WithInfrastructureName(value string) *InfrastructureStatusApplyConfiguration { + b.InfrastructureName = &value + return b +} + +// WithPlatform sets the Platform field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Platform field is set to the value of the last call. +func (b *InfrastructureStatusApplyConfiguration) WithPlatform(value configv1.PlatformType) *InfrastructureStatusApplyConfiguration { + b.Platform = &value + return b +} + +// WithPlatformStatus sets the PlatformStatus field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PlatformStatus field is set to the value of the last call. +func (b *InfrastructureStatusApplyConfiguration) WithPlatformStatus(value *PlatformStatusApplyConfiguration) *InfrastructureStatusApplyConfiguration { + b.PlatformStatus = value + return b +} + +// WithEtcdDiscoveryDomain sets the EtcdDiscoveryDomain field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EtcdDiscoveryDomain field is set to the value of the last call. +func (b *InfrastructureStatusApplyConfiguration) WithEtcdDiscoveryDomain(value string) *InfrastructureStatusApplyConfiguration { + b.EtcdDiscoveryDomain = &value + return b +} + +// WithAPIServerURL sets the APIServerURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIServerURL field is set to the value of the last call. +func (b *InfrastructureStatusApplyConfiguration) WithAPIServerURL(value string) *InfrastructureStatusApplyConfiguration { + b.APIServerURL = &value + return b +} + +// WithAPIServerInternalURL sets the APIServerInternalURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIServerInternalURL field is set to the value of the last call. +func (b *InfrastructureStatusApplyConfiguration) WithAPIServerInternalURL(value string) *InfrastructureStatusApplyConfiguration { + b.APIServerInternalURL = &value + return b +} + +// WithControlPlaneTopology sets the ControlPlaneTopology field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ControlPlaneTopology field is set to the value of the last call. +func (b *InfrastructureStatusApplyConfiguration) WithControlPlaneTopology(value configv1.TopologyMode) *InfrastructureStatusApplyConfiguration { + b.ControlPlaneTopology = &value + return b +} + +// WithInfrastructureTopology sets the InfrastructureTopology field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the InfrastructureTopology field is set to the value of the last call. +func (b *InfrastructureStatusApplyConfiguration) WithInfrastructureTopology(value configv1.TopologyMode) *InfrastructureStatusApplyConfiguration { + b.InfrastructureTopology = &value + return b +} + +// WithCPUPartitioning sets the CPUPartitioning field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CPUPartitioning field is set to the value of the last call. +func (b *InfrastructureStatusApplyConfiguration) WithCPUPartitioning(value configv1.CPUPartitioningMode) *InfrastructureStatusApplyConfiguration { + b.CPUPartitioning = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingress.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingress.go new file mode 100644 index 0000000000000..945bacf8a80aa --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingress.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// IngressApplyConfiguration represents a declarative configuration of the Ingress type for use +// with apply. +type IngressApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *IngressSpecApplyConfiguration `json:"spec,omitempty"` + Status *IngressStatusApplyConfiguration `json:"status,omitempty"` +} + +// Ingress constructs a declarative configuration of the Ingress type for use with +// apply. +func Ingress(name string) *IngressApplyConfiguration { + b := &IngressApplyConfiguration{} + b.WithName(name) + b.WithKind("Ingress") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractIngress extracts the applied configuration owned by fieldManager from +// ingress. If no managedFields are found in ingress for fieldManager, a +// IngressApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// ingress must be a unmodified Ingress API object that was retrieved from the Kubernetes API. +// ExtractIngress provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractIngress(ingress *configv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) { + return extractIngress(ingress, fieldManager, "") +} + +// ExtractIngressStatus is the same as ExtractIngress except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractIngressStatus(ingress *configv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) { + return extractIngress(ingress, fieldManager, "status") +} + +func extractIngress(ingress *configv1.Ingress, fieldManager string, subresource string) (*IngressApplyConfiguration, error) { + b := &IngressApplyConfiguration{} + err := managedfields.ExtractInto(ingress, internal.Parser().Type("com.github.openshift.api.config.v1.Ingress"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(ingress.Name) + + b.WithKind("Ingress") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *IngressApplyConfiguration) WithKind(value string) *IngressApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *IngressApplyConfiguration) WithAPIVersion(value string) *IngressApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *IngressApplyConfiguration) WithName(value string) *IngressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *IngressApplyConfiguration) WithGenerateName(value string) *IngressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *IngressApplyConfiguration) WithUID(value types.UID) *IngressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *IngressApplyConfiguration) WithResourceVersion(value string) *IngressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *IngressApplyConfiguration) WithGeneration(value int64) *IngressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *IngressApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *IngressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *IngressApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *IngressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *IngressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IngressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *IngressApplyConfiguration) WithLabels(entries map[string]string) *IngressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *IngressApplyConfiguration) WithAnnotations(entries map[string]string) *IngressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *IngressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *IngressApplyConfiguration) WithFinalizers(values ...string) *IngressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *IngressApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *IngressApplyConfiguration) WithSpec(value *IngressSpecApplyConfiguration) *IngressApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfiguration) *IngressApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IngressApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressplatformspec.go new file mode 100644 index 0000000000000..ed5c265315ba2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressplatformspec.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// IngressPlatformSpecApplyConfiguration represents a declarative configuration of the IngressPlatformSpec type for use +// with apply. +type IngressPlatformSpecApplyConfiguration struct { + Type *configv1.PlatformType `json:"type,omitempty"` + AWS *AWSIngressSpecApplyConfiguration `json:"aws,omitempty"` +} + +// IngressPlatformSpecApplyConfiguration constructs a declarative configuration of the IngressPlatformSpec type for use with +// apply. +func IngressPlatformSpec() *IngressPlatformSpecApplyConfiguration { + return &IngressPlatformSpecApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *IngressPlatformSpecApplyConfiguration) WithType(value configv1.PlatformType) *IngressPlatformSpecApplyConfiguration { + b.Type = &value + return b +} + +// WithAWS sets the AWS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AWS field is set to the value of the last call. +func (b *IngressPlatformSpecApplyConfiguration) WithAWS(value *AWSIngressSpecApplyConfiguration) *IngressPlatformSpecApplyConfiguration { + b.AWS = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressspec.go new file mode 100644 index 0000000000000..a9b09512caa5f --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressspec.go @@ -0,0 +1,69 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// IngressSpecApplyConfiguration represents a declarative configuration of the IngressSpec type for use +// with apply. +type IngressSpecApplyConfiguration struct { + Domain *string `json:"domain,omitempty"` + AppsDomain *string `json:"appsDomain,omitempty"` + ComponentRoutes []ComponentRouteSpecApplyConfiguration `json:"componentRoutes,omitempty"` + RequiredHSTSPolicies []RequiredHSTSPolicyApplyConfiguration `json:"requiredHSTSPolicies,omitempty"` + LoadBalancer *LoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"` +} + +// IngressSpecApplyConfiguration constructs a declarative configuration of the IngressSpec type for use with +// apply. +func IngressSpec() *IngressSpecApplyConfiguration { + return &IngressSpecApplyConfiguration{} +} + +// WithDomain sets the Domain field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Domain field is set to the value of the last call. +func (b *IngressSpecApplyConfiguration) WithDomain(value string) *IngressSpecApplyConfiguration { + b.Domain = &value + return b +} + +// WithAppsDomain sets the AppsDomain field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AppsDomain field is set to the value of the last call. +func (b *IngressSpecApplyConfiguration) WithAppsDomain(value string) *IngressSpecApplyConfiguration { + b.AppsDomain = &value + return b +} + +// WithComponentRoutes adds the given value to the ComponentRoutes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ComponentRoutes field. +func (b *IngressSpecApplyConfiguration) WithComponentRoutes(values ...*ComponentRouteSpecApplyConfiguration) *IngressSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithComponentRoutes") + } + b.ComponentRoutes = append(b.ComponentRoutes, *values[i]) + } + return b +} + +// WithRequiredHSTSPolicies adds the given value to the RequiredHSTSPolicies field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RequiredHSTSPolicies field. +func (b *IngressSpecApplyConfiguration) WithRequiredHSTSPolicies(values ...*RequiredHSTSPolicyApplyConfiguration) *IngressSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRequiredHSTSPolicies") + } + b.RequiredHSTSPolicies = append(b.RequiredHSTSPolicies, *values[i]) + } + return b +} + +// WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LoadBalancer field is set to the value of the last call. +func (b *IngressSpecApplyConfiguration) WithLoadBalancer(value *LoadBalancerApplyConfiguration) *IngressSpecApplyConfiguration { + b.LoadBalancer = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressstatus.go new file mode 100644 index 0000000000000..792bcd75519bc --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressstatus.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// IngressStatusApplyConfiguration represents a declarative configuration of the IngressStatus type for use +// with apply. +type IngressStatusApplyConfiguration struct { + ComponentRoutes []ComponentRouteStatusApplyConfiguration `json:"componentRoutes,omitempty"` + DefaultPlacement *configv1.DefaultPlacement `json:"defaultPlacement,omitempty"` +} + +// IngressStatusApplyConfiguration constructs a declarative configuration of the IngressStatus type for use with +// apply. +func IngressStatus() *IngressStatusApplyConfiguration { + return &IngressStatusApplyConfiguration{} +} + +// WithComponentRoutes adds the given value to the ComponentRoutes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ComponentRoutes field. +func (b *IngressStatusApplyConfiguration) WithComponentRoutes(values ...*ComponentRouteStatusApplyConfiguration) *IngressStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithComponentRoutes") + } + b.ComponentRoutes = append(b.ComponentRoutes, *values[i]) + } + return b +} + +// WithDefaultPlacement sets the DefaultPlacement field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DefaultPlacement field is set to the value of the last call. +func (b *IngressStatusApplyConfiguration) WithDefaultPlacement(value configv1.DefaultPlacement) *IngressStatusApplyConfiguration { + b.DefaultPlacement = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/keystoneidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/keystoneidentityprovider.go new file mode 100644 index 0000000000000..abbb9ef152be1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/keystoneidentityprovider.go @@ -0,0 +1,56 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// KeystoneIdentityProviderApplyConfiguration represents a declarative configuration of the KeystoneIdentityProvider type for use +// with apply. +type KeystoneIdentityProviderApplyConfiguration struct { + OAuthRemoteConnectionInfoApplyConfiguration `json:",inline"` + DomainName *string `json:"domainName,omitempty"` +} + +// KeystoneIdentityProviderApplyConfiguration constructs a declarative configuration of the KeystoneIdentityProvider type for use with +// apply. +func KeystoneIdentityProvider() *KeystoneIdentityProviderApplyConfiguration { + return &KeystoneIdentityProviderApplyConfiguration{} +} + +// WithURL sets the URL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the URL field is set to the value of the last call. +func (b *KeystoneIdentityProviderApplyConfiguration) WithURL(value string) *KeystoneIdentityProviderApplyConfiguration { + b.OAuthRemoteConnectionInfoApplyConfiguration.URL = &value + return b +} + +// WithCA sets the CA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CA field is set to the value of the last call. +func (b *KeystoneIdentityProviderApplyConfiguration) WithCA(value *ConfigMapNameReferenceApplyConfiguration) *KeystoneIdentityProviderApplyConfiguration { + b.OAuthRemoteConnectionInfoApplyConfiguration.CA = value + return b +} + +// WithTLSClientCert sets the TLSClientCert field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSClientCert field is set to the value of the last call. +func (b *KeystoneIdentityProviderApplyConfiguration) WithTLSClientCert(value *SecretNameReferenceApplyConfiguration) *KeystoneIdentityProviderApplyConfiguration { + b.OAuthRemoteConnectionInfoApplyConfiguration.TLSClientCert = value + return b +} + +// WithTLSClientKey sets the TLSClientKey field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSClientKey field is set to the value of the last call. +func (b *KeystoneIdentityProviderApplyConfiguration) WithTLSClientKey(value *SecretNameReferenceApplyConfiguration) *KeystoneIdentityProviderApplyConfiguration { + b.OAuthRemoteConnectionInfoApplyConfiguration.TLSClientKey = value + return b +} + +// WithDomainName sets the DomainName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DomainName field is set to the value of the last call. +func (b *KeystoneIdentityProviderApplyConfiguration) WithDomainName(value string) *KeystoneIdentityProviderApplyConfiguration { + b.DomainName = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/kubevirtplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/kubevirtplatformstatus.go new file mode 100644 index 0000000000000..3d136c53b2c01 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/kubevirtplatformstatus.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// KubevirtPlatformStatusApplyConfiguration represents a declarative configuration of the KubevirtPlatformStatus type for use +// with apply. +type KubevirtPlatformStatusApplyConfiguration struct { + APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` + IngressIP *string `json:"ingressIP,omitempty"` +} + +// KubevirtPlatformStatusApplyConfiguration constructs a declarative configuration of the KubevirtPlatformStatus type for use with +// apply. +func KubevirtPlatformStatus() *KubevirtPlatformStatusApplyConfiguration { + return &KubevirtPlatformStatusApplyConfiguration{} +} + +// WithAPIServerInternalIP sets the APIServerInternalIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIServerInternalIP field is set to the value of the last call. +func (b *KubevirtPlatformStatusApplyConfiguration) WithAPIServerInternalIP(value string) *KubevirtPlatformStatusApplyConfiguration { + b.APIServerInternalIP = &value + return b +} + +// WithIngressIP sets the IngressIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IngressIP field is set to the value of the last call. +func (b *KubevirtPlatformStatusApplyConfiguration) WithIngressIP(value string) *KubevirtPlatformStatusApplyConfiguration { + b.IngressIP = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ldapattributemapping.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ldapattributemapping.go new file mode 100644 index 0000000000000..b618065cea58c --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ldapattributemapping.go @@ -0,0 +1,58 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// LDAPAttributeMappingApplyConfiguration represents a declarative configuration of the LDAPAttributeMapping type for use +// with apply. +type LDAPAttributeMappingApplyConfiguration struct { + ID []string `json:"id,omitempty"` + PreferredUsername []string `json:"preferredUsername,omitempty"` + Name []string `json:"name,omitempty"` + Email []string `json:"email,omitempty"` +} + +// LDAPAttributeMappingApplyConfiguration constructs a declarative configuration of the LDAPAttributeMapping type for use with +// apply. +func LDAPAttributeMapping() *LDAPAttributeMappingApplyConfiguration { + return &LDAPAttributeMappingApplyConfiguration{} +} + +// WithID adds the given value to the ID field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ID field. +func (b *LDAPAttributeMappingApplyConfiguration) WithID(values ...string) *LDAPAttributeMappingApplyConfiguration { + for i := range values { + b.ID = append(b.ID, values[i]) + } + return b +} + +// WithPreferredUsername adds the given value to the PreferredUsername field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the PreferredUsername field. +func (b *LDAPAttributeMappingApplyConfiguration) WithPreferredUsername(values ...string) *LDAPAttributeMappingApplyConfiguration { + for i := range values { + b.PreferredUsername = append(b.PreferredUsername, values[i]) + } + return b +} + +// WithName adds the given value to the Name field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Name field. +func (b *LDAPAttributeMappingApplyConfiguration) WithName(values ...string) *LDAPAttributeMappingApplyConfiguration { + for i := range values { + b.Name = append(b.Name, values[i]) + } + return b +} + +// WithEmail adds the given value to the Email field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Email field. +func (b *LDAPAttributeMappingApplyConfiguration) WithEmail(values ...string) *LDAPAttributeMappingApplyConfiguration { + for i := range values { + b.Email = append(b.Email, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ldapidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ldapidentityprovider.go new file mode 100644 index 0000000000000..90bdfe34c21e4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ldapidentityprovider.go @@ -0,0 +1,68 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// LDAPIdentityProviderApplyConfiguration represents a declarative configuration of the LDAPIdentityProvider type for use +// with apply. +type LDAPIdentityProviderApplyConfiguration struct { + URL *string `json:"url,omitempty"` + BindDN *string `json:"bindDN,omitempty"` + BindPassword *SecretNameReferenceApplyConfiguration `json:"bindPassword,omitempty"` + Insecure *bool `json:"insecure,omitempty"` + CA *ConfigMapNameReferenceApplyConfiguration `json:"ca,omitempty"` + Attributes *LDAPAttributeMappingApplyConfiguration `json:"attributes,omitempty"` +} + +// LDAPIdentityProviderApplyConfiguration constructs a declarative configuration of the LDAPIdentityProvider type for use with +// apply. +func LDAPIdentityProvider() *LDAPIdentityProviderApplyConfiguration { + return &LDAPIdentityProviderApplyConfiguration{} +} + +// WithURL sets the URL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the URL field is set to the value of the last call. +func (b *LDAPIdentityProviderApplyConfiguration) WithURL(value string) *LDAPIdentityProviderApplyConfiguration { + b.URL = &value + return b +} + +// WithBindDN sets the BindDN field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BindDN field is set to the value of the last call. +func (b *LDAPIdentityProviderApplyConfiguration) WithBindDN(value string) *LDAPIdentityProviderApplyConfiguration { + b.BindDN = &value + return b +} + +// WithBindPassword sets the BindPassword field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BindPassword field is set to the value of the last call. +func (b *LDAPIdentityProviderApplyConfiguration) WithBindPassword(value *SecretNameReferenceApplyConfiguration) *LDAPIdentityProviderApplyConfiguration { + b.BindPassword = value + return b +} + +// WithInsecure sets the Insecure field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Insecure field is set to the value of the last call. +func (b *LDAPIdentityProviderApplyConfiguration) WithInsecure(value bool) *LDAPIdentityProviderApplyConfiguration { + b.Insecure = &value + return b +} + +// WithCA sets the CA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CA field is set to the value of the last call. +func (b *LDAPIdentityProviderApplyConfiguration) WithCA(value *ConfigMapNameReferenceApplyConfiguration) *LDAPIdentityProviderApplyConfiguration { + b.CA = value + return b +} + +// WithAttributes sets the Attributes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Attributes field is set to the value of the last call. +func (b *LDAPIdentityProviderApplyConfiguration) WithAttributes(value *LDAPAttributeMappingApplyConfiguration) *LDAPIdentityProviderApplyConfiguration { + b.Attributes = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/loadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/loadbalancer.go new file mode 100644 index 0000000000000..0dfc67c8f36fb --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/loadbalancer.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// LoadBalancerApplyConfiguration represents a declarative configuration of the LoadBalancer type for use +// with apply. +type LoadBalancerApplyConfiguration struct { + Platform *IngressPlatformSpecApplyConfiguration `json:"platform,omitempty"` +} + +// LoadBalancerApplyConfiguration constructs a declarative configuration of the LoadBalancer type for use with +// apply. +func LoadBalancer() *LoadBalancerApplyConfiguration { + return &LoadBalancerApplyConfiguration{} +} + +// WithPlatform sets the Platform field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Platform field is set to the value of the last call. +func (b *LoadBalancerApplyConfiguration) WithPlatform(value *IngressPlatformSpecApplyConfiguration) *LoadBalancerApplyConfiguration { + b.Platform = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/maxagepolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/maxagepolicy.go new file mode 100644 index 0000000000000..faa8e1dd565b6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/maxagepolicy.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// MaxAgePolicyApplyConfiguration represents a declarative configuration of the MaxAgePolicy type for use +// with apply. +type MaxAgePolicyApplyConfiguration struct { + LargestMaxAge *int32 `json:"largestMaxAge,omitempty"` + SmallestMaxAge *int32 `json:"smallestMaxAge,omitempty"` +} + +// MaxAgePolicyApplyConfiguration constructs a declarative configuration of the MaxAgePolicy type for use with +// apply. +func MaxAgePolicy() *MaxAgePolicyApplyConfiguration { + return &MaxAgePolicyApplyConfiguration{} +} + +// WithLargestMaxAge sets the LargestMaxAge field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LargestMaxAge field is set to the value of the last call. +func (b *MaxAgePolicyApplyConfiguration) WithLargestMaxAge(value int32) *MaxAgePolicyApplyConfiguration { + b.LargestMaxAge = &value + return b +} + +// WithSmallestMaxAge sets the SmallestMaxAge field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SmallestMaxAge field is set to the value of the last call. +func (b *MaxAgePolicyApplyConfiguration) WithSmallestMaxAge(value int32) *MaxAgePolicyApplyConfiguration { + b.SmallestMaxAge = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/mtumigration.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/mtumigration.go new file mode 100644 index 0000000000000..9db99100ee847 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/mtumigration.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// MTUMigrationApplyConfiguration represents a declarative configuration of the MTUMigration type for use +// with apply. +type MTUMigrationApplyConfiguration struct { + Network *MTUMigrationValuesApplyConfiguration `json:"network,omitempty"` + Machine *MTUMigrationValuesApplyConfiguration `json:"machine,omitempty"` +} + +// MTUMigrationApplyConfiguration constructs a declarative configuration of the MTUMigration type for use with +// apply. +func MTUMigration() *MTUMigrationApplyConfiguration { + return &MTUMigrationApplyConfiguration{} +} + +// WithNetwork sets the Network field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Network field is set to the value of the last call. +func (b *MTUMigrationApplyConfiguration) WithNetwork(value *MTUMigrationValuesApplyConfiguration) *MTUMigrationApplyConfiguration { + b.Network = value + return b +} + +// WithMachine sets the Machine field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Machine field is set to the value of the last call. +func (b *MTUMigrationApplyConfiguration) WithMachine(value *MTUMigrationValuesApplyConfiguration) *MTUMigrationApplyConfiguration { + b.Machine = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/mtumigrationvalues.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/mtumigrationvalues.go new file mode 100644 index 0000000000000..8d346f25f4967 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/mtumigrationvalues.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// MTUMigrationValuesApplyConfiguration represents a declarative configuration of the MTUMigrationValues type for use +// with apply. +type MTUMigrationValuesApplyConfiguration struct { + To *uint32 `json:"to,omitempty"` + From *uint32 `json:"from,omitempty"` +} + +// MTUMigrationValuesApplyConfiguration constructs a declarative configuration of the MTUMigrationValues type for use with +// apply. +func MTUMigrationValues() *MTUMigrationValuesApplyConfiguration { + return &MTUMigrationValuesApplyConfiguration{} +} + +// WithTo sets the To field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the To field is set to the value of the last call. +func (b *MTUMigrationValuesApplyConfiguration) WithTo(value uint32) *MTUMigrationValuesApplyConfiguration { + b.To = &value + return b +} + +// WithFrom sets the From field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the From field is set to the value of the last call. +func (b *MTUMigrationValuesApplyConfiguration) WithFrom(value uint32) *MTUMigrationValuesApplyConfiguration { + b.From = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/network.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/network.go new file mode 100644 index 0000000000000..195594eadd804 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/network.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// NetworkApplyConfiguration represents a declarative configuration of the Network type for use +// with apply. +type NetworkApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *NetworkSpecApplyConfiguration `json:"spec,omitempty"` + Status *NetworkStatusApplyConfiguration `json:"status,omitempty"` +} + +// Network constructs a declarative configuration of the Network type for use with +// apply. +func Network(name string) *NetworkApplyConfiguration { + b := &NetworkApplyConfiguration{} + b.WithName(name) + b.WithKind("Network") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractNetwork extracts the applied configuration owned by fieldManager from +// network. If no managedFields are found in network for fieldManager, a +// NetworkApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// network must be a unmodified Network API object that was retrieved from the Kubernetes API. +// ExtractNetwork provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractNetwork(network *configv1.Network, fieldManager string) (*NetworkApplyConfiguration, error) { + return extractNetwork(network, fieldManager, "") +} + +// ExtractNetworkStatus is the same as ExtractNetwork except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractNetworkStatus(network *configv1.Network, fieldManager string) (*NetworkApplyConfiguration, error) { + return extractNetwork(network, fieldManager, "status") +} + +func extractNetwork(network *configv1.Network, fieldManager string, subresource string) (*NetworkApplyConfiguration, error) { + b := &NetworkApplyConfiguration{} + err := managedfields.ExtractInto(network, internal.Parser().Type("com.github.openshift.api.config.v1.Network"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(network.Name) + + b.WithKind("Network") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithKind(value string) *NetworkApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithAPIVersion(value string) *NetworkApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithName(value string) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithGenerateName(value string) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithNamespace(value string) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithUID(value types.UID) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithResourceVersion(value string) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithGeneration(value int64) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *NetworkApplyConfiguration) WithLabels(entries map[string]string) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *NetworkApplyConfiguration) WithAnnotations(entries map[string]string) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *NetworkApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *NetworkApplyConfiguration) WithFinalizers(values ...string) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *NetworkApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithSpec(value *NetworkSpecApplyConfiguration) *NetworkApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithStatus(value *NetworkStatusApplyConfiguration) *NetworkApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *NetworkApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnostics.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnostics.go new file mode 100644 index 0000000000000..a2624dc5bccb8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnostics.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// NetworkDiagnosticsApplyConfiguration represents a declarative configuration of the NetworkDiagnostics type for use +// with apply. +type NetworkDiagnosticsApplyConfiguration struct { + Mode *configv1.NetworkDiagnosticsMode `json:"mode,omitempty"` + SourcePlacement *NetworkDiagnosticsSourcePlacementApplyConfiguration `json:"sourcePlacement,omitempty"` + TargetPlacement *NetworkDiagnosticsTargetPlacementApplyConfiguration `json:"targetPlacement,omitempty"` +} + +// NetworkDiagnosticsApplyConfiguration constructs a declarative configuration of the NetworkDiagnostics type for use with +// apply. +func NetworkDiagnostics() *NetworkDiagnosticsApplyConfiguration { + return &NetworkDiagnosticsApplyConfiguration{} +} + +// WithMode sets the Mode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Mode field is set to the value of the last call. +func (b *NetworkDiagnosticsApplyConfiguration) WithMode(value configv1.NetworkDiagnosticsMode) *NetworkDiagnosticsApplyConfiguration { + b.Mode = &value + return b +} + +// WithSourcePlacement sets the SourcePlacement field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SourcePlacement field is set to the value of the last call. +func (b *NetworkDiagnosticsApplyConfiguration) WithSourcePlacement(value *NetworkDiagnosticsSourcePlacementApplyConfiguration) *NetworkDiagnosticsApplyConfiguration { + b.SourcePlacement = value + return b +} + +// WithTargetPlacement sets the TargetPlacement field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TargetPlacement field is set to the value of the last call. +func (b *NetworkDiagnosticsApplyConfiguration) WithTargetPlacement(value *NetworkDiagnosticsTargetPlacementApplyConfiguration) *NetworkDiagnosticsApplyConfiguration { + b.TargetPlacement = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticssourceplacement.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticssourceplacement.go new file mode 100644 index 0000000000000..a1960ba9fe7f7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticssourceplacement.go @@ -0,0 +1,44 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// NetworkDiagnosticsSourcePlacementApplyConfiguration represents a declarative configuration of the NetworkDiagnosticsSourcePlacement type for use +// with apply. +type NetworkDiagnosticsSourcePlacementApplyConfiguration struct { + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +// NetworkDiagnosticsSourcePlacementApplyConfiguration constructs a declarative configuration of the NetworkDiagnosticsSourcePlacement type for use with +// apply. +func NetworkDiagnosticsSourcePlacement() *NetworkDiagnosticsSourcePlacementApplyConfiguration { + return &NetworkDiagnosticsSourcePlacementApplyConfiguration{} +} + +// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the NodeSelector field, +// overwriting an existing map entries in NodeSelector field with the same key. +func (b *NetworkDiagnosticsSourcePlacementApplyConfiguration) WithNodeSelector(entries map[string]string) *NetworkDiagnosticsSourcePlacementApplyConfiguration { + if b.NodeSelector == nil && len(entries) > 0 { + b.NodeSelector = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.NodeSelector[k] = v + } + return b +} + +// WithTolerations adds the given value to the Tolerations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tolerations field. +func (b *NetworkDiagnosticsSourcePlacementApplyConfiguration) WithTolerations(values ...corev1.Toleration) *NetworkDiagnosticsSourcePlacementApplyConfiguration { + for i := range values { + b.Tolerations = append(b.Tolerations, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticstargetplacement.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticstargetplacement.go new file mode 100644 index 0000000000000..ba0dbab8a0f34 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticstargetplacement.go @@ -0,0 +1,44 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// NetworkDiagnosticsTargetPlacementApplyConfiguration represents a declarative configuration of the NetworkDiagnosticsTargetPlacement type for use +// with apply. +type NetworkDiagnosticsTargetPlacementApplyConfiguration struct { + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +// NetworkDiagnosticsTargetPlacementApplyConfiguration constructs a declarative configuration of the NetworkDiagnosticsTargetPlacement type for use with +// apply. +func NetworkDiagnosticsTargetPlacement() *NetworkDiagnosticsTargetPlacementApplyConfiguration { + return &NetworkDiagnosticsTargetPlacementApplyConfiguration{} +} + +// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the NodeSelector field, +// overwriting an existing map entries in NodeSelector field with the same key. +func (b *NetworkDiagnosticsTargetPlacementApplyConfiguration) WithNodeSelector(entries map[string]string) *NetworkDiagnosticsTargetPlacementApplyConfiguration { + if b.NodeSelector == nil && len(entries) > 0 { + b.NodeSelector = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.NodeSelector[k] = v + } + return b +} + +// WithTolerations adds the given value to the Tolerations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tolerations field. +func (b *NetworkDiagnosticsTargetPlacementApplyConfiguration) WithTolerations(values ...corev1.Toleration) *NetworkDiagnosticsTargetPlacementApplyConfiguration { + for i := range values { + b.Tolerations = append(b.Tolerations, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkmigration.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkmigration.go new file mode 100644 index 0000000000000..9c829474626d3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkmigration.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NetworkMigrationApplyConfiguration represents a declarative configuration of the NetworkMigration type for use +// with apply. +type NetworkMigrationApplyConfiguration struct { + NetworkType *string `json:"networkType,omitempty"` + MTU *MTUMigrationApplyConfiguration `json:"mtu,omitempty"` +} + +// NetworkMigrationApplyConfiguration constructs a declarative configuration of the NetworkMigration type for use with +// apply. +func NetworkMigration() *NetworkMigrationApplyConfiguration { + return &NetworkMigrationApplyConfiguration{} +} + +// WithNetworkType sets the NetworkType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NetworkType field is set to the value of the last call. +func (b *NetworkMigrationApplyConfiguration) WithNetworkType(value string) *NetworkMigrationApplyConfiguration { + b.NetworkType = &value + return b +} + +// WithMTU sets the MTU field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MTU field is set to the value of the last call. +func (b *NetworkMigrationApplyConfiguration) WithMTU(value *MTUMigrationApplyConfiguration) *NetworkMigrationApplyConfiguration { + b.MTU = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkspec.go new file mode 100644 index 0000000000000..d4e970e34f707 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkspec.go @@ -0,0 +1,75 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NetworkSpecApplyConfiguration represents a declarative configuration of the NetworkSpec type for use +// with apply. +type NetworkSpecApplyConfiguration struct { + ClusterNetwork []ClusterNetworkEntryApplyConfiguration `json:"clusterNetwork,omitempty"` + ServiceNetwork []string `json:"serviceNetwork,omitempty"` + NetworkType *string `json:"networkType,omitempty"` + ExternalIP *ExternalIPConfigApplyConfiguration `json:"externalIP,omitempty"` + ServiceNodePortRange *string `json:"serviceNodePortRange,omitempty"` + NetworkDiagnostics *NetworkDiagnosticsApplyConfiguration `json:"networkDiagnostics,omitempty"` +} + +// NetworkSpecApplyConfiguration constructs a declarative configuration of the NetworkSpec type for use with +// apply. +func NetworkSpec() *NetworkSpecApplyConfiguration { + return &NetworkSpecApplyConfiguration{} +} + +// WithClusterNetwork adds the given value to the ClusterNetwork field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ClusterNetwork field. +func (b *NetworkSpecApplyConfiguration) WithClusterNetwork(values ...*ClusterNetworkEntryApplyConfiguration) *NetworkSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithClusterNetwork") + } + b.ClusterNetwork = append(b.ClusterNetwork, *values[i]) + } + return b +} + +// WithServiceNetwork adds the given value to the ServiceNetwork field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ServiceNetwork field. +func (b *NetworkSpecApplyConfiguration) WithServiceNetwork(values ...string) *NetworkSpecApplyConfiguration { + for i := range values { + b.ServiceNetwork = append(b.ServiceNetwork, values[i]) + } + return b +} + +// WithNetworkType sets the NetworkType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NetworkType field is set to the value of the last call. +func (b *NetworkSpecApplyConfiguration) WithNetworkType(value string) *NetworkSpecApplyConfiguration { + b.NetworkType = &value + return b +} + +// WithExternalIP sets the ExternalIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExternalIP field is set to the value of the last call. +func (b *NetworkSpecApplyConfiguration) WithExternalIP(value *ExternalIPConfigApplyConfiguration) *NetworkSpecApplyConfiguration { + b.ExternalIP = value + return b +} + +// WithServiceNodePortRange sets the ServiceNodePortRange field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceNodePortRange field is set to the value of the last call. +func (b *NetworkSpecApplyConfiguration) WithServiceNodePortRange(value string) *NetworkSpecApplyConfiguration { + b.ServiceNodePortRange = &value + return b +} + +// WithNetworkDiagnostics sets the NetworkDiagnostics field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NetworkDiagnostics field is set to the value of the last call. +func (b *NetworkSpecApplyConfiguration) WithNetworkDiagnostics(value *NetworkDiagnosticsApplyConfiguration) *NetworkSpecApplyConfiguration { + b.NetworkDiagnostics = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkstatus.go new file mode 100644 index 0000000000000..de3697ed71a84 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkstatus.go @@ -0,0 +1,84 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// NetworkStatusApplyConfiguration represents a declarative configuration of the NetworkStatus type for use +// with apply. +type NetworkStatusApplyConfiguration struct { + ClusterNetwork []ClusterNetworkEntryApplyConfiguration `json:"clusterNetwork,omitempty"` + ServiceNetwork []string `json:"serviceNetwork,omitempty"` + NetworkType *string `json:"networkType,omitempty"` + ClusterNetworkMTU *int `json:"clusterNetworkMTU,omitempty"` + Migration *NetworkMigrationApplyConfiguration `json:"migration,omitempty"` + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// NetworkStatusApplyConfiguration constructs a declarative configuration of the NetworkStatus type for use with +// apply. +func NetworkStatus() *NetworkStatusApplyConfiguration { + return &NetworkStatusApplyConfiguration{} +} + +// WithClusterNetwork adds the given value to the ClusterNetwork field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ClusterNetwork field. +func (b *NetworkStatusApplyConfiguration) WithClusterNetwork(values ...*ClusterNetworkEntryApplyConfiguration) *NetworkStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithClusterNetwork") + } + b.ClusterNetwork = append(b.ClusterNetwork, *values[i]) + } + return b +} + +// WithServiceNetwork adds the given value to the ServiceNetwork field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ServiceNetwork field. +func (b *NetworkStatusApplyConfiguration) WithServiceNetwork(values ...string) *NetworkStatusApplyConfiguration { + for i := range values { + b.ServiceNetwork = append(b.ServiceNetwork, values[i]) + } + return b +} + +// WithNetworkType sets the NetworkType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NetworkType field is set to the value of the last call. +func (b *NetworkStatusApplyConfiguration) WithNetworkType(value string) *NetworkStatusApplyConfiguration { + b.NetworkType = &value + return b +} + +// WithClusterNetworkMTU sets the ClusterNetworkMTU field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterNetworkMTU field is set to the value of the last call. +func (b *NetworkStatusApplyConfiguration) WithClusterNetworkMTU(value int) *NetworkStatusApplyConfiguration { + b.ClusterNetworkMTU = &value + return b +} + +// WithMigration sets the Migration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Migration field is set to the value of the last call. +func (b *NetworkStatusApplyConfiguration) WithMigration(value *NetworkMigrationApplyConfiguration) *NetworkStatusApplyConfiguration { + b.Migration = value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *NetworkStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *NetworkStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/node.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/node.go new file mode 100644 index 0000000000000..61170562359e3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/node.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// NodeApplyConfiguration represents a declarative configuration of the Node type for use +// with apply. +type NodeApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *NodeSpecApplyConfiguration `json:"spec,omitempty"` + Status *NodeStatusApplyConfiguration `json:"status,omitempty"` +} + +// Node constructs a declarative configuration of the Node type for use with +// apply. +func Node(name string) *NodeApplyConfiguration { + b := &NodeApplyConfiguration{} + b.WithName(name) + b.WithKind("Node") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractNode extracts the applied configuration owned by fieldManager from +// node. If no managedFields are found in node for fieldManager, a +// NodeApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// node must be a unmodified Node API object that was retrieved from the Kubernetes API. +// ExtractNode provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractNode(node *configv1.Node, fieldManager string) (*NodeApplyConfiguration, error) { + return extractNode(node, fieldManager, "") +} + +// ExtractNodeStatus is the same as ExtractNode except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractNodeStatus(node *configv1.Node, fieldManager string) (*NodeApplyConfiguration, error) { + return extractNode(node, fieldManager, "status") +} + +func extractNode(node *configv1.Node, fieldManager string, subresource string) (*NodeApplyConfiguration, error) { + b := &NodeApplyConfiguration{} + err := managedfields.ExtractInto(node, internal.Parser().Type("com.github.openshift.api.config.v1.Node"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(node.Name) + + b.WithKind("Node") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *NodeApplyConfiguration) WithKind(value string) *NodeApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *NodeApplyConfiguration) WithAPIVersion(value string) *NodeApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *NodeApplyConfiguration) WithName(value string) *NodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *NodeApplyConfiguration) WithGenerateName(value string) *NodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *NodeApplyConfiguration) WithNamespace(value string) *NodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *NodeApplyConfiguration) WithUID(value types.UID) *NodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *NodeApplyConfiguration) WithResourceVersion(value string) *NodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *NodeApplyConfiguration) WithGeneration(value int64) *NodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *NodeApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *NodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *NodeApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *NodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *NodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *NodeApplyConfiguration) WithLabels(entries map[string]string) *NodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *NodeApplyConfiguration) WithAnnotations(entries map[string]string) *NodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *NodeApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *NodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *NodeApplyConfiguration) WithFinalizers(values ...string) *NodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *NodeApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *NodeApplyConfiguration) WithSpec(value *NodeSpecApplyConfiguration) *NodeApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *NodeApplyConfiguration) WithStatus(value *NodeStatusApplyConfiguration) *NodeApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *NodeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodespec.go new file mode 100644 index 0000000000000..a0732e78a398d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodespec.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// NodeSpecApplyConfiguration represents a declarative configuration of the NodeSpec type for use +// with apply. +type NodeSpecApplyConfiguration struct { + CgroupMode *configv1.CgroupMode `json:"cgroupMode,omitempty"` + WorkerLatencyProfile *configv1.WorkerLatencyProfileType `json:"workerLatencyProfile,omitempty"` + MinimumKubeletVersion *string `json:"minimumKubeletVersion,omitempty"` +} + +// NodeSpecApplyConfiguration constructs a declarative configuration of the NodeSpec type for use with +// apply. +func NodeSpec() *NodeSpecApplyConfiguration { + return &NodeSpecApplyConfiguration{} +} + +// WithCgroupMode sets the CgroupMode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CgroupMode field is set to the value of the last call. +func (b *NodeSpecApplyConfiguration) WithCgroupMode(value configv1.CgroupMode) *NodeSpecApplyConfiguration { + b.CgroupMode = &value + return b +} + +// WithWorkerLatencyProfile sets the WorkerLatencyProfile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WorkerLatencyProfile field is set to the value of the last call. +func (b *NodeSpecApplyConfiguration) WithWorkerLatencyProfile(value configv1.WorkerLatencyProfileType) *NodeSpecApplyConfiguration { + b.WorkerLatencyProfile = &value + return b +} + +// WithMinimumKubeletVersion sets the MinimumKubeletVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinimumKubeletVersion field is set to the value of the last call. +func (b *NodeSpecApplyConfiguration) WithMinimumKubeletVersion(value string) *NodeSpecApplyConfiguration { + b.MinimumKubeletVersion = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodestatus.go new file mode 100644 index 0000000000000..ee6ebd99ee60f --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodestatus.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// NodeStatusApplyConfiguration represents a declarative configuration of the NodeStatus type for use +// with apply. +type NodeStatusApplyConfiguration struct { + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// NodeStatusApplyConfiguration constructs a declarative configuration of the NodeStatus type for use with +// apply. +func NodeStatus() *NodeStatusApplyConfiguration { + return &NodeStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *NodeStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *NodeStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixfailuredomain.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixfailuredomain.go new file mode 100644 index 0000000000000..31d77a83e2e1a --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixfailuredomain.go @@ -0,0 +1,46 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NutanixFailureDomainApplyConfiguration represents a declarative configuration of the NutanixFailureDomain type for use +// with apply. +type NutanixFailureDomainApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Cluster *NutanixResourceIdentifierApplyConfiguration `json:"cluster,omitempty"` + Subnets []NutanixResourceIdentifierApplyConfiguration `json:"subnets,omitempty"` +} + +// NutanixFailureDomainApplyConfiguration constructs a declarative configuration of the NutanixFailureDomain type for use with +// apply. +func NutanixFailureDomain() *NutanixFailureDomainApplyConfiguration { + return &NutanixFailureDomainApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *NutanixFailureDomainApplyConfiguration) WithName(value string) *NutanixFailureDomainApplyConfiguration { + b.Name = &value + return b +} + +// WithCluster sets the Cluster field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Cluster field is set to the value of the last call. +func (b *NutanixFailureDomainApplyConfiguration) WithCluster(value *NutanixResourceIdentifierApplyConfiguration) *NutanixFailureDomainApplyConfiguration { + b.Cluster = value + return b +} + +// WithSubnets adds the given value to the Subnets field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Subnets field. +func (b *NutanixFailureDomainApplyConfiguration) WithSubnets(values ...*NutanixResourceIdentifierApplyConfiguration) *NutanixFailureDomainApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSubnets") + } + b.Subnets = append(b.Subnets, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformloadbalancer.go new file mode 100644 index 0000000000000..84d3b7ade3563 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformloadbalancer.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// NutanixPlatformLoadBalancerApplyConfiguration represents a declarative configuration of the NutanixPlatformLoadBalancer type for use +// with apply. +type NutanixPlatformLoadBalancerApplyConfiguration struct { + Type *configv1.PlatformLoadBalancerType `json:"type,omitempty"` +} + +// NutanixPlatformLoadBalancerApplyConfiguration constructs a declarative configuration of the NutanixPlatformLoadBalancer type for use with +// apply. +func NutanixPlatformLoadBalancer() *NutanixPlatformLoadBalancerApplyConfiguration { + return &NutanixPlatformLoadBalancerApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *NutanixPlatformLoadBalancerApplyConfiguration) WithType(value configv1.PlatformLoadBalancerType) *NutanixPlatformLoadBalancerApplyConfiguration { + b.Type = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformspec.go new file mode 100644 index 0000000000000..8f7cb98423627 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformspec.go @@ -0,0 +1,51 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NutanixPlatformSpecApplyConfiguration represents a declarative configuration of the NutanixPlatformSpec type for use +// with apply. +type NutanixPlatformSpecApplyConfiguration struct { + PrismCentral *NutanixPrismEndpointApplyConfiguration `json:"prismCentral,omitempty"` + PrismElements []NutanixPrismElementEndpointApplyConfiguration `json:"prismElements,omitempty"` + FailureDomains []NutanixFailureDomainApplyConfiguration `json:"failureDomains,omitempty"` +} + +// NutanixPlatformSpecApplyConfiguration constructs a declarative configuration of the NutanixPlatformSpec type for use with +// apply. +func NutanixPlatformSpec() *NutanixPlatformSpecApplyConfiguration { + return &NutanixPlatformSpecApplyConfiguration{} +} + +// WithPrismCentral sets the PrismCentral field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PrismCentral field is set to the value of the last call. +func (b *NutanixPlatformSpecApplyConfiguration) WithPrismCentral(value *NutanixPrismEndpointApplyConfiguration) *NutanixPlatformSpecApplyConfiguration { + b.PrismCentral = value + return b +} + +// WithPrismElements adds the given value to the PrismElements field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the PrismElements field. +func (b *NutanixPlatformSpecApplyConfiguration) WithPrismElements(values ...*NutanixPrismElementEndpointApplyConfiguration) *NutanixPlatformSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPrismElements") + } + b.PrismElements = append(b.PrismElements, *values[i]) + } + return b +} + +// WithFailureDomains adds the given value to the FailureDomains field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the FailureDomains field. +func (b *NutanixPlatformSpecApplyConfiguration) WithFailureDomains(values ...*NutanixFailureDomainApplyConfiguration) *NutanixPlatformSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithFailureDomains") + } + b.FailureDomains = append(b.FailureDomains, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformstatus.go new file mode 100644 index 0000000000000..d7988e5115eb1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformstatus.go @@ -0,0 +1,63 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NutanixPlatformStatusApplyConfiguration represents a declarative configuration of the NutanixPlatformStatus type for use +// with apply. +type NutanixPlatformStatusApplyConfiguration struct { + APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` + APIServerInternalIPs []string `json:"apiServerInternalIPs,omitempty"` + IngressIP *string `json:"ingressIP,omitempty"` + IngressIPs []string `json:"ingressIPs,omitempty"` + LoadBalancer *NutanixPlatformLoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"` +} + +// NutanixPlatformStatusApplyConfiguration constructs a declarative configuration of the NutanixPlatformStatus type for use with +// apply. +func NutanixPlatformStatus() *NutanixPlatformStatusApplyConfiguration { + return &NutanixPlatformStatusApplyConfiguration{} +} + +// WithAPIServerInternalIP sets the APIServerInternalIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIServerInternalIP field is set to the value of the last call. +func (b *NutanixPlatformStatusApplyConfiguration) WithAPIServerInternalIP(value string) *NutanixPlatformStatusApplyConfiguration { + b.APIServerInternalIP = &value + return b +} + +// WithAPIServerInternalIPs adds the given value to the APIServerInternalIPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the APIServerInternalIPs field. +func (b *NutanixPlatformStatusApplyConfiguration) WithAPIServerInternalIPs(values ...string) *NutanixPlatformStatusApplyConfiguration { + for i := range values { + b.APIServerInternalIPs = append(b.APIServerInternalIPs, values[i]) + } + return b +} + +// WithIngressIP sets the IngressIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IngressIP field is set to the value of the last call. +func (b *NutanixPlatformStatusApplyConfiguration) WithIngressIP(value string) *NutanixPlatformStatusApplyConfiguration { + b.IngressIP = &value + return b +} + +// WithIngressIPs adds the given value to the IngressIPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the IngressIPs field. +func (b *NutanixPlatformStatusApplyConfiguration) WithIngressIPs(values ...string) *NutanixPlatformStatusApplyConfiguration { + for i := range values { + b.IngressIPs = append(b.IngressIPs, values[i]) + } + return b +} + +// WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LoadBalancer field is set to the value of the last call. +func (b *NutanixPlatformStatusApplyConfiguration) WithLoadBalancer(value *NutanixPlatformLoadBalancerApplyConfiguration) *NutanixPlatformStatusApplyConfiguration { + b.LoadBalancer = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixprismelementendpoint.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixprismelementendpoint.go new file mode 100644 index 0000000000000..2e59ff235a601 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixprismelementendpoint.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NutanixPrismElementEndpointApplyConfiguration represents a declarative configuration of the NutanixPrismElementEndpoint type for use +// with apply. +type NutanixPrismElementEndpointApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Endpoint *NutanixPrismEndpointApplyConfiguration `json:"endpoint,omitempty"` +} + +// NutanixPrismElementEndpointApplyConfiguration constructs a declarative configuration of the NutanixPrismElementEndpoint type for use with +// apply. +func NutanixPrismElementEndpoint() *NutanixPrismElementEndpointApplyConfiguration { + return &NutanixPrismElementEndpointApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *NutanixPrismElementEndpointApplyConfiguration) WithName(value string) *NutanixPrismElementEndpointApplyConfiguration { + b.Name = &value + return b +} + +// WithEndpoint sets the Endpoint field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Endpoint field is set to the value of the last call. +func (b *NutanixPrismElementEndpointApplyConfiguration) WithEndpoint(value *NutanixPrismEndpointApplyConfiguration) *NutanixPrismElementEndpointApplyConfiguration { + b.Endpoint = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixprismendpoint.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixprismendpoint.go new file mode 100644 index 0000000000000..8012c2cb23a73 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixprismendpoint.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NutanixPrismEndpointApplyConfiguration represents a declarative configuration of the NutanixPrismEndpoint type for use +// with apply. +type NutanixPrismEndpointApplyConfiguration struct { + Address *string `json:"address,omitempty"` + Port *int32 `json:"port,omitempty"` +} + +// NutanixPrismEndpointApplyConfiguration constructs a declarative configuration of the NutanixPrismEndpoint type for use with +// apply. +func NutanixPrismEndpoint() *NutanixPrismEndpointApplyConfiguration { + return &NutanixPrismEndpointApplyConfiguration{} +} + +// WithAddress sets the Address field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Address field is set to the value of the last call. +func (b *NutanixPrismEndpointApplyConfiguration) WithAddress(value string) *NutanixPrismEndpointApplyConfiguration { + b.Address = &value + return b +} + +// WithPort sets the Port field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Port field is set to the value of the last call. +func (b *NutanixPrismEndpointApplyConfiguration) WithPort(value int32) *NutanixPrismEndpointApplyConfiguration { + b.Port = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixresourceidentifier.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixresourceidentifier.go new file mode 100644 index 0000000000000..5e9b095d83b62 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixresourceidentifier.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// NutanixResourceIdentifierApplyConfiguration represents a declarative configuration of the NutanixResourceIdentifier type for use +// with apply. +type NutanixResourceIdentifierApplyConfiguration struct { + Type *configv1.NutanixIdentifierType `json:"type,omitempty"` + UUID *string `json:"uuid,omitempty"` + Name *string `json:"name,omitempty"` +} + +// NutanixResourceIdentifierApplyConfiguration constructs a declarative configuration of the NutanixResourceIdentifier type for use with +// apply. +func NutanixResourceIdentifier() *NutanixResourceIdentifierApplyConfiguration { + return &NutanixResourceIdentifierApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *NutanixResourceIdentifierApplyConfiguration) WithType(value configv1.NutanixIdentifierType) *NutanixResourceIdentifierApplyConfiguration { + b.Type = &value + return b +} + +// WithUUID sets the UUID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UUID field is set to the value of the last call. +func (b *NutanixResourceIdentifierApplyConfiguration) WithUUID(value string) *NutanixResourceIdentifierApplyConfiguration { + b.UUID = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *NutanixResourceIdentifierApplyConfiguration) WithName(value string) *NutanixResourceIdentifierApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauth.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauth.go new file mode 100644 index 0000000000000..1c9589c0803c8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauth.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// OAuthApplyConfiguration represents a declarative configuration of the OAuth type for use +// with apply. +type OAuthApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *OAuthSpecApplyConfiguration `json:"spec,omitempty"` + Status *configv1.OAuthStatus `json:"status,omitempty"` +} + +// OAuth constructs a declarative configuration of the OAuth type for use with +// apply. +func OAuth(name string) *OAuthApplyConfiguration { + b := &OAuthApplyConfiguration{} + b.WithName(name) + b.WithKind("OAuth") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractOAuth extracts the applied configuration owned by fieldManager from +// oAuth. If no managedFields are found in oAuth for fieldManager, a +// OAuthApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// oAuth must be a unmodified OAuth API object that was retrieved from the Kubernetes API. +// ExtractOAuth provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractOAuth(oAuth *configv1.OAuth, fieldManager string) (*OAuthApplyConfiguration, error) { + return extractOAuth(oAuth, fieldManager, "") +} + +// ExtractOAuthStatus is the same as ExtractOAuth except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractOAuthStatus(oAuth *configv1.OAuth, fieldManager string) (*OAuthApplyConfiguration, error) { + return extractOAuth(oAuth, fieldManager, "status") +} + +func extractOAuth(oAuth *configv1.OAuth, fieldManager string, subresource string) (*OAuthApplyConfiguration, error) { + b := &OAuthApplyConfiguration{} + err := managedfields.ExtractInto(oAuth, internal.Parser().Type("com.github.openshift.api.config.v1.OAuth"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(oAuth.Name) + + b.WithKind("OAuth") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *OAuthApplyConfiguration) WithKind(value string) *OAuthApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *OAuthApplyConfiguration) WithAPIVersion(value string) *OAuthApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *OAuthApplyConfiguration) WithName(value string) *OAuthApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *OAuthApplyConfiguration) WithGenerateName(value string) *OAuthApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *OAuthApplyConfiguration) WithNamespace(value string) *OAuthApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *OAuthApplyConfiguration) WithUID(value types.UID) *OAuthApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *OAuthApplyConfiguration) WithResourceVersion(value string) *OAuthApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *OAuthApplyConfiguration) WithGeneration(value int64) *OAuthApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *OAuthApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *OAuthApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *OAuthApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *OAuthApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *OAuthApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *OAuthApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *OAuthApplyConfiguration) WithLabels(entries map[string]string) *OAuthApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *OAuthApplyConfiguration) WithAnnotations(entries map[string]string) *OAuthApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *OAuthApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *OAuthApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *OAuthApplyConfiguration) WithFinalizers(values ...string) *OAuthApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *OAuthApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *OAuthApplyConfiguration) WithSpec(value *OAuthSpecApplyConfiguration) *OAuthApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *OAuthApplyConfiguration) WithStatus(value configv1.OAuthStatus) *OAuthApplyConfiguration { + b.Status = &value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *OAuthApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthremoteconnectioninfo.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthremoteconnectioninfo.go new file mode 100644 index 0000000000000..3b348819d9010 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthremoteconnectioninfo.go @@ -0,0 +1,50 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// OAuthRemoteConnectionInfoApplyConfiguration represents a declarative configuration of the OAuthRemoteConnectionInfo type for use +// with apply. +type OAuthRemoteConnectionInfoApplyConfiguration struct { + URL *string `json:"url,omitempty"` + CA *ConfigMapNameReferenceApplyConfiguration `json:"ca,omitempty"` + TLSClientCert *SecretNameReferenceApplyConfiguration `json:"tlsClientCert,omitempty"` + TLSClientKey *SecretNameReferenceApplyConfiguration `json:"tlsClientKey,omitempty"` +} + +// OAuthRemoteConnectionInfoApplyConfiguration constructs a declarative configuration of the OAuthRemoteConnectionInfo type for use with +// apply. +func OAuthRemoteConnectionInfo() *OAuthRemoteConnectionInfoApplyConfiguration { + return &OAuthRemoteConnectionInfoApplyConfiguration{} +} + +// WithURL sets the URL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the URL field is set to the value of the last call. +func (b *OAuthRemoteConnectionInfoApplyConfiguration) WithURL(value string) *OAuthRemoteConnectionInfoApplyConfiguration { + b.URL = &value + return b +} + +// WithCA sets the CA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CA field is set to the value of the last call. +func (b *OAuthRemoteConnectionInfoApplyConfiguration) WithCA(value *ConfigMapNameReferenceApplyConfiguration) *OAuthRemoteConnectionInfoApplyConfiguration { + b.CA = value + return b +} + +// WithTLSClientCert sets the TLSClientCert field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSClientCert field is set to the value of the last call. +func (b *OAuthRemoteConnectionInfoApplyConfiguration) WithTLSClientCert(value *SecretNameReferenceApplyConfiguration) *OAuthRemoteConnectionInfoApplyConfiguration { + b.TLSClientCert = value + return b +} + +// WithTLSClientKey sets the TLSClientKey field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSClientKey field is set to the value of the last call. +func (b *OAuthRemoteConnectionInfoApplyConfiguration) WithTLSClientKey(value *SecretNameReferenceApplyConfiguration) *OAuthRemoteConnectionInfoApplyConfiguration { + b.TLSClientKey = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthspec.go new file mode 100644 index 0000000000000..5eacc05cb4b33 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthspec.go @@ -0,0 +1,46 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// OAuthSpecApplyConfiguration represents a declarative configuration of the OAuthSpec type for use +// with apply. +type OAuthSpecApplyConfiguration struct { + IdentityProviders []IdentityProviderApplyConfiguration `json:"identityProviders,omitempty"` + TokenConfig *TokenConfigApplyConfiguration `json:"tokenConfig,omitempty"` + Templates *OAuthTemplatesApplyConfiguration `json:"templates,omitempty"` +} + +// OAuthSpecApplyConfiguration constructs a declarative configuration of the OAuthSpec type for use with +// apply. +func OAuthSpec() *OAuthSpecApplyConfiguration { + return &OAuthSpecApplyConfiguration{} +} + +// WithIdentityProviders adds the given value to the IdentityProviders field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the IdentityProviders field. +func (b *OAuthSpecApplyConfiguration) WithIdentityProviders(values ...*IdentityProviderApplyConfiguration) *OAuthSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithIdentityProviders") + } + b.IdentityProviders = append(b.IdentityProviders, *values[i]) + } + return b +} + +// WithTokenConfig sets the TokenConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TokenConfig field is set to the value of the last call. +func (b *OAuthSpecApplyConfiguration) WithTokenConfig(value *TokenConfigApplyConfiguration) *OAuthSpecApplyConfiguration { + b.TokenConfig = value + return b +} + +// WithTemplates sets the Templates field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Templates field is set to the value of the last call. +func (b *OAuthSpecApplyConfiguration) WithTemplates(value *OAuthTemplatesApplyConfiguration) *OAuthSpecApplyConfiguration { + b.Templates = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthtemplates.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthtemplates.go new file mode 100644 index 0000000000000..98bc5a0db9451 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthtemplates.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// OAuthTemplatesApplyConfiguration represents a declarative configuration of the OAuthTemplates type for use +// with apply. +type OAuthTemplatesApplyConfiguration struct { + Login *SecretNameReferenceApplyConfiguration `json:"login,omitempty"` + ProviderSelection *SecretNameReferenceApplyConfiguration `json:"providerSelection,omitempty"` + Error *SecretNameReferenceApplyConfiguration `json:"error,omitempty"` +} + +// OAuthTemplatesApplyConfiguration constructs a declarative configuration of the OAuthTemplates type for use with +// apply. +func OAuthTemplates() *OAuthTemplatesApplyConfiguration { + return &OAuthTemplatesApplyConfiguration{} +} + +// WithLogin sets the Login field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Login field is set to the value of the last call. +func (b *OAuthTemplatesApplyConfiguration) WithLogin(value *SecretNameReferenceApplyConfiguration) *OAuthTemplatesApplyConfiguration { + b.Login = value + return b +} + +// WithProviderSelection sets the ProviderSelection field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProviderSelection field is set to the value of the last call. +func (b *OAuthTemplatesApplyConfiguration) WithProviderSelection(value *SecretNameReferenceApplyConfiguration) *OAuthTemplatesApplyConfiguration { + b.ProviderSelection = value + return b +} + +// WithError sets the Error field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Error field is set to the value of the last call. +func (b *OAuthTemplatesApplyConfiguration) WithError(value *SecretNameReferenceApplyConfiguration) *OAuthTemplatesApplyConfiguration { + b.Error = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/objectreference.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/objectreference.go new file mode 100644 index 0000000000000..dfbc465e7176d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/objectreference.go @@ -0,0 +1,50 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ObjectReferenceApplyConfiguration represents a declarative configuration of the ObjectReference type for use +// with apply. +type ObjectReferenceApplyConfiguration struct { + Group *string `json:"group,omitempty"` + Resource *string `json:"resource,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` +} + +// ObjectReferenceApplyConfiguration constructs a declarative configuration of the ObjectReference type for use with +// apply. +func ObjectReference() *ObjectReferenceApplyConfiguration { + return &ObjectReferenceApplyConfiguration{} +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *ObjectReferenceApplyConfiguration) WithGroup(value string) *ObjectReferenceApplyConfiguration { + b.Group = &value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *ObjectReferenceApplyConfiguration) WithResource(value string) *ObjectReferenceApplyConfiguration { + b.Resource = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ObjectReferenceApplyConfiguration) WithNamespace(value string) *ObjectReferenceApplyConfiguration { + b.Namespace = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ObjectReferenceApplyConfiguration) WithName(value string) *ObjectReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientconfig.go new file mode 100644 index 0000000000000..65fa3dd46249e --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientconfig.go @@ -0,0 +1,61 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// OIDCClientConfigApplyConfiguration represents a declarative configuration of the OIDCClientConfig type for use +// with apply. +type OIDCClientConfigApplyConfiguration struct { + ComponentName *string `json:"componentName,omitempty"` + ComponentNamespace *string `json:"componentNamespace,omitempty"` + ClientID *string `json:"clientID,omitempty"` + ClientSecret *SecretNameReferenceApplyConfiguration `json:"clientSecret,omitempty"` + ExtraScopes []string `json:"extraScopes,omitempty"` +} + +// OIDCClientConfigApplyConfiguration constructs a declarative configuration of the OIDCClientConfig type for use with +// apply. +func OIDCClientConfig() *OIDCClientConfigApplyConfiguration { + return &OIDCClientConfigApplyConfiguration{} +} + +// WithComponentName sets the ComponentName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ComponentName field is set to the value of the last call. +func (b *OIDCClientConfigApplyConfiguration) WithComponentName(value string) *OIDCClientConfigApplyConfiguration { + b.ComponentName = &value + return b +} + +// WithComponentNamespace sets the ComponentNamespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ComponentNamespace field is set to the value of the last call. +func (b *OIDCClientConfigApplyConfiguration) WithComponentNamespace(value string) *OIDCClientConfigApplyConfiguration { + b.ComponentNamespace = &value + return b +} + +// WithClientID sets the ClientID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientID field is set to the value of the last call. +func (b *OIDCClientConfigApplyConfiguration) WithClientID(value string) *OIDCClientConfigApplyConfiguration { + b.ClientID = &value + return b +} + +// WithClientSecret sets the ClientSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientSecret field is set to the value of the last call. +func (b *OIDCClientConfigApplyConfiguration) WithClientSecret(value *SecretNameReferenceApplyConfiguration) *OIDCClientConfigApplyConfiguration { + b.ClientSecret = value + return b +} + +// WithExtraScopes adds the given value to the ExtraScopes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExtraScopes field. +func (b *OIDCClientConfigApplyConfiguration) WithExtraScopes(values ...string) *OIDCClientConfigApplyConfiguration { + for i := range values { + b.ExtraScopes = append(b.ExtraScopes, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientreference.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientreference.go new file mode 100644 index 0000000000000..5109305b23dbd --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientreference.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// OIDCClientReferenceApplyConfiguration represents a declarative configuration of the OIDCClientReference type for use +// with apply. +type OIDCClientReferenceApplyConfiguration struct { + OIDCProviderName *string `json:"oidcProviderName,omitempty"` + IssuerURL *string `json:"issuerURL,omitempty"` + ClientID *string `json:"clientID,omitempty"` +} + +// OIDCClientReferenceApplyConfiguration constructs a declarative configuration of the OIDCClientReference type for use with +// apply. +func OIDCClientReference() *OIDCClientReferenceApplyConfiguration { + return &OIDCClientReferenceApplyConfiguration{} +} + +// WithOIDCProviderName sets the OIDCProviderName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OIDCProviderName field is set to the value of the last call. +func (b *OIDCClientReferenceApplyConfiguration) WithOIDCProviderName(value string) *OIDCClientReferenceApplyConfiguration { + b.OIDCProviderName = &value + return b +} + +// WithIssuerURL sets the IssuerURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IssuerURL field is set to the value of the last call. +func (b *OIDCClientReferenceApplyConfiguration) WithIssuerURL(value string) *OIDCClientReferenceApplyConfiguration { + b.IssuerURL = &value + return b +} + +// WithClientID sets the ClientID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientID field is set to the value of the last call. +func (b *OIDCClientReferenceApplyConfiguration) WithClientID(value string) *OIDCClientReferenceApplyConfiguration { + b.ClientID = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientstatus.go new file mode 100644 index 0000000000000..5d365a87ecb35 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientstatus.go @@ -0,0 +1,76 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// OIDCClientStatusApplyConfiguration represents a declarative configuration of the OIDCClientStatus type for use +// with apply. +type OIDCClientStatusApplyConfiguration struct { + ComponentName *string `json:"componentName,omitempty"` + ComponentNamespace *string `json:"componentNamespace,omitempty"` + CurrentOIDCClients []OIDCClientReferenceApplyConfiguration `json:"currentOIDCClients,omitempty"` + ConsumingUsers []configv1.ConsumingUser `json:"consumingUsers,omitempty"` + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// OIDCClientStatusApplyConfiguration constructs a declarative configuration of the OIDCClientStatus type for use with +// apply. +func OIDCClientStatus() *OIDCClientStatusApplyConfiguration { + return &OIDCClientStatusApplyConfiguration{} +} + +// WithComponentName sets the ComponentName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ComponentName field is set to the value of the last call. +func (b *OIDCClientStatusApplyConfiguration) WithComponentName(value string) *OIDCClientStatusApplyConfiguration { + b.ComponentName = &value + return b +} + +// WithComponentNamespace sets the ComponentNamespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ComponentNamespace field is set to the value of the last call. +func (b *OIDCClientStatusApplyConfiguration) WithComponentNamespace(value string) *OIDCClientStatusApplyConfiguration { + b.ComponentNamespace = &value + return b +} + +// WithCurrentOIDCClients adds the given value to the CurrentOIDCClients field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the CurrentOIDCClients field. +func (b *OIDCClientStatusApplyConfiguration) WithCurrentOIDCClients(values ...*OIDCClientReferenceApplyConfiguration) *OIDCClientStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithCurrentOIDCClients") + } + b.CurrentOIDCClients = append(b.CurrentOIDCClients, *values[i]) + } + return b +} + +// WithConsumingUsers adds the given value to the ConsumingUsers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ConsumingUsers field. +func (b *OIDCClientStatusApplyConfiguration) WithConsumingUsers(values ...configv1.ConsumingUser) *OIDCClientStatusApplyConfiguration { + for i := range values { + b.ConsumingUsers = append(b.ConsumingUsers, values[i]) + } + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *OIDCClientStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *OIDCClientStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcprovider.go new file mode 100644 index 0000000000000..7d93003673645 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcprovider.go @@ -0,0 +1,69 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// OIDCProviderApplyConfiguration represents a declarative configuration of the OIDCProvider type for use +// with apply. +type OIDCProviderApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Issuer *TokenIssuerApplyConfiguration `json:"issuer,omitempty"` + OIDCClients []OIDCClientConfigApplyConfiguration `json:"oidcClients,omitempty"` + ClaimMappings *TokenClaimMappingsApplyConfiguration `json:"claimMappings,omitempty"` + ClaimValidationRules []TokenClaimValidationRuleApplyConfiguration `json:"claimValidationRules,omitempty"` +} + +// OIDCProviderApplyConfiguration constructs a declarative configuration of the OIDCProvider type for use with +// apply. +func OIDCProvider() *OIDCProviderApplyConfiguration { + return &OIDCProviderApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *OIDCProviderApplyConfiguration) WithName(value string) *OIDCProviderApplyConfiguration { + b.Name = &value + return b +} + +// WithIssuer sets the Issuer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Issuer field is set to the value of the last call. +func (b *OIDCProviderApplyConfiguration) WithIssuer(value *TokenIssuerApplyConfiguration) *OIDCProviderApplyConfiguration { + b.Issuer = value + return b +} + +// WithOIDCClients adds the given value to the OIDCClients field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OIDCClients field. +func (b *OIDCProviderApplyConfiguration) WithOIDCClients(values ...*OIDCClientConfigApplyConfiguration) *OIDCProviderApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOIDCClients") + } + b.OIDCClients = append(b.OIDCClients, *values[i]) + } + return b +} + +// WithClaimMappings sets the ClaimMappings field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClaimMappings field is set to the value of the last call. +func (b *OIDCProviderApplyConfiguration) WithClaimMappings(value *TokenClaimMappingsApplyConfiguration) *OIDCProviderApplyConfiguration { + b.ClaimMappings = value + return b +} + +// WithClaimValidationRules adds the given value to the ClaimValidationRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ClaimValidationRules field. +func (b *OIDCProviderApplyConfiguration) WithClaimValidationRules(values ...*TokenClaimValidationRuleApplyConfiguration) *OIDCProviderApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithClaimValidationRules") + } + b.ClaimValidationRules = append(b.ClaimValidationRules, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openidclaims.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openidclaims.go new file mode 100644 index 0000000000000..8f11192c5b5af --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openidclaims.go @@ -0,0 +1,62 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// OpenIDClaimsApplyConfiguration represents a declarative configuration of the OpenIDClaims type for use +// with apply. +type OpenIDClaimsApplyConfiguration struct { + PreferredUsername []string `json:"preferredUsername,omitempty"` + Name []string `json:"name,omitempty"` + Email []string `json:"email,omitempty"` + Groups []configv1.OpenIDClaim `json:"groups,omitempty"` +} + +// OpenIDClaimsApplyConfiguration constructs a declarative configuration of the OpenIDClaims type for use with +// apply. +func OpenIDClaims() *OpenIDClaimsApplyConfiguration { + return &OpenIDClaimsApplyConfiguration{} +} + +// WithPreferredUsername adds the given value to the PreferredUsername field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the PreferredUsername field. +func (b *OpenIDClaimsApplyConfiguration) WithPreferredUsername(values ...string) *OpenIDClaimsApplyConfiguration { + for i := range values { + b.PreferredUsername = append(b.PreferredUsername, values[i]) + } + return b +} + +// WithName adds the given value to the Name field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Name field. +func (b *OpenIDClaimsApplyConfiguration) WithName(values ...string) *OpenIDClaimsApplyConfiguration { + for i := range values { + b.Name = append(b.Name, values[i]) + } + return b +} + +// WithEmail adds the given value to the Email field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Email field. +func (b *OpenIDClaimsApplyConfiguration) WithEmail(values ...string) *OpenIDClaimsApplyConfiguration { + for i := range values { + b.Email = append(b.Email, values[i]) + } + return b +} + +// WithGroups adds the given value to the Groups field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Groups field. +func (b *OpenIDClaimsApplyConfiguration) WithGroups(values ...configv1.OpenIDClaim) *OpenIDClaimsApplyConfiguration { + for i := range values { + b.Groups = append(b.Groups, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openididentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openididentityprovider.go new file mode 100644 index 0000000000000..9372178cf2815 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openididentityprovider.go @@ -0,0 +1,85 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// OpenIDIdentityProviderApplyConfiguration represents a declarative configuration of the OpenIDIdentityProvider type for use +// with apply. +type OpenIDIdentityProviderApplyConfiguration struct { + ClientID *string `json:"clientID,omitempty"` + ClientSecret *SecretNameReferenceApplyConfiguration `json:"clientSecret,omitempty"` + CA *ConfigMapNameReferenceApplyConfiguration `json:"ca,omitempty"` + ExtraScopes []string `json:"extraScopes,omitempty"` + ExtraAuthorizeParameters map[string]string `json:"extraAuthorizeParameters,omitempty"` + Issuer *string `json:"issuer,omitempty"` + Claims *OpenIDClaimsApplyConfiguration `json:"claims,omitempty"` +} + +// OpenIDIdentityProviderApplyConfiguration constructs a declarative configuration of the OpenIDIdentityProvider type for use with +// apply. +func OpenIDIdentityProvider() *OpenIDIdentityProviderApplyConfiguration { + return &OpenIDIdentityProviderApplyConfiguration{} +} + +// WithClientID sets the ClientID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientID field is set to the value of the last call. +func (b *OpenIDIdentityProviderApplyConfiguration) WithClientID(value string) *OpenIDIdentityProviderApplyConfiguration { + b.ClientID = &value + return b +} + +// WithClientSecret sets the ClientSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientSecret field is set to the value of the last call. +func (b *OpenIDIdentityProviderApplyConfiguration) WithClientSecret(value *SecretNameReferenceApplyConfiguration) *OpenIDIdentityProviderApplyConfiguration { + b.ClientSecret = value + return b +} + +// WithCA sets the CA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CA field is set to the value of the last call. +func (b *OpenIDIdentityProviderApplyConfiguration) WithCA(value *ConfigMapNameReferenceApplyConfiguration) *OpenIDIdentityProviderApplyConfiguration { + b.CA = value + return b +} + +// WithExtraScopes adds the given value to the ExtraScopes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExtraScopes field. +func (b *OpenIDIdentityProviderApplyConfiguration) WithExtraScopes(values ...string) *OpenIDIdentityProviderApplyConfiguration { + for i := range values { + b.ExtraScopes = append(b.ExtraScopes, values[i]) + } + return b +} + +// WithExtraAuthorizeParameters puts the entries into the ExtraAuthorizeParameters field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the ExtraAuthorizeParameters field, +// overwriting an existing map entries in ExtraAuthorizeParameters field with the same key. +func (b *OpenIDIdentityProviderApplyConfiguration) WithExtraAuthorizeParameters(entries map[string]string) *OpenIDIdentityProviderApplyConfiguration { + if b.ExtraAuthorizeParameters == nil && len(entries) > 0 { + b.ExtraAuthorizeParameters = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ExtraAuthorizeParameters[k] = v + } + return b +} + +// WithIssuer sets the Issuer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Issuer field is set to the value of the last call. +func (b *OpenIDIdentityProviderApplyConfiguration) WithIssuer(value string) *OpenIDIdentityProviderApplyConfiguration { + b.Issuer = &value + return b +} + +// WithClaims sets the Claims field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Claims field is set to the value of the last call. +func (b *OpenIDIdentityProviderApplyConfiguration) WithClaims(value *OpenIDClaimsApplyConfiguration) *OpenIDIdentityProviderApplyConfiguration { + b.Claims = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformloadbalancer.go new file mode 100644 index 0000000000000..f65d682d57ace --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformloadbalancer.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// OpenStackPlatformLoadBalancerApplyConfiguration represents a declarative configuration of the OpenStackPlatformLoadBalancer type for use +// with apply. +type OpenStackPlatformLoadBalancerApplyConfiguration struct { + Type *configv1.PlatformLoadBalancerType `json:"type,omitempty"` +} + +// OpenStackPlatformLoadBalancerApplyConfiguration constructs a declarative configuration of the OpenStackPlatformLoadBalancer type for use with +// apply. +func OpenStackPlatformLoadBalancer() *OpenStackPlatformLoadBalancerApplyConfiguration { + return &OpenStackPlatformLoadBalancerApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *OpenStackPlatformLoadBalancerApplyConfiguration) WithType(value configv1.PlatformLoadBalancerType) *OpenStackPlatformLoadBalancerApplyConfiguration { + b.Type = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformspec.go new file mode 100644 index 0000000000000..af43c83306909 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformspec.go @@ -0,0 +1,51 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// OpenStackPlatformSpecApplyConfiguration represents a declarative configuration of the OpenStackPlatformSpec type for use +// with apply. +type OpenStackPlatformSpecApplyConfiguration struct { + APIServerInternalIPs []configv1.IP `json:"apiServerInternalIPs,omitempty"` + IngressIPs []configv1.IP `json:"ingressIPs,omitempty"` + MachineNetworks []configv1.CIDR `json:"machineNetworks,omitempty"` +} + +// OpenStackPlatformSpecApplyConfiguration constructs a declarative configuration of the OpenStackPlatformSpec type for use with +// apply. +func OpenStackPlatformSpec() *OpenStackPlatformSpecApplyConfiguration { + return &OpenStackPlatformSpecApplyConfiguration{} +} + +// WithAPIServerInternalIPs adds the given value to the APIServerInternalIPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the APIServerInternalIPs field. +func (b *OpenStackPlatformSpecApplyConfiguration) WithAPIServerInternalIPs(values ...configv1.IP) *OpenStackPlatformSpecApplyConfiguration { + for i := range values { + b.APIServerInternalIPs = append(b.APIServerInternalIPs, values[i]) + } + return b +} + +// WithIngressIPs adds the given value to the IngressIPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the IngressIPs field. +func (b *OpenStackPlatformSpecApplyConfiguration) WithIngressIPs(values ...configv1.IP) *OpenStackPlatformSpecApplyConfiguration { + for i := range values { + b.IngressIPs = append(b.IngressIPs, values[i]) + } + return b +} + +// WithMachineNetworks adds the given value to the MachineNetworks field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MachineNetworks field. +func (b *OpenStackPlatformSpecApplyConfiguration) WithMachineNetworks(values ...configv1.CIDR) *OpenStackPlatformSpecApplyConfiguration { + for i := range values { + b.MachineNetworks = append(b.MachineNetworks, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformstatus.go new file mode 100644 index 0000000000000..f06c78e24336e --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformstatus.go @@ -0,0 +1,96 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// OpenStackPlatformStatusApplyConfiguration represents a declarative configuration of the OpenStackPlatformStatus type for use +// with apply. +type OpenStackPlatformStatusApplyConfiguration struct { + APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` + APIServerInternalIPs []string `json:"apiServerInternalIPs,omitempty"` + CloudName *string `json:"cloudName,omitempty"` + IngressIP *string `json:"ingressIP,omitempty"` + IngressIPs []string `json:"ingressIPs,omitempty"` + NodeDNSIP *string `json:"nodeDNSIP,omitempty"` + LoadBalancer *OpenStackPlatformLoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"` + MachineNetworks []configv1.CIDR `json:"machineNetworks,omitempty"` +} + +// OpenStackPlatformStatusApplyConfiguration constructs a declarative configuration of the OpenStackPlatformStatus type for use with +// apply. +func OpenStackPlatformStatus() *OpenStackPlatformStatusApplyConfiguration { + return &OpenStackPlatformStatusApplyConfiguration{} +} + +// WithAPIServerInternalIP sets the APIServerInternalIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIServerInternalIP field is set to the value of the last call. +func (b *OpenStackPlatformStatusApplyConfiguration) WithAPIServerInternalIP(value string) *OpenStackPlatformStatusApplyConfiguration { + b.APIServerInternalIP = &value + return b +} + +// WithAPIServerInternalIPs adds the given value to the APIServerInternalIPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the APIServerInternalIPs field. +func (b *OpenStackPlatformStatusApplyConfiguration) WithAPIServerInternalIPs(values ...string) *OpenStackPlatformStatusApplyConfiguration { + for i := range values { + b.APIServerInternalIPs = append(b.APIServerInternalIPs, values[i]) + } + return b +} + +// WithCloudName sets the CloudName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CloudName field is set to the value of the last call. +func (b *OpenStackPlatformStatusApplyConfiguration) WithCloudName(value string) *OpenStackPlatformStatusApplyConfiguration { + b.CloudName = &value + return b +} + +// WithIngressIP sets the IngressIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IngressIP field is set to the value of the last call. +func (b *OpenStackPlatformStatusApplyConfiguration) WithIngressIP(value string) *OpenStackPlatformStatusApplyConfiguration { + b.IngressIP = &value + return b +} + +// WithIngressIPs adds the given value to the IngressIPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the IngressIPs field. +func (b *OpenStackPlatformStatusApplyConfiguration) WithIngressIPs(values ...string) *OpenStackPlatformStatusApplyConfiguration { + for i := range values { + b.IngressIPs = append(b.IngressIPs, values[i]) + } + return b +} + +// WithNodeDNSIP sets the NodeDNSIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeDNSIP field is set to the value of the last call. +func (b *OpenStackPlatformStatusApplyConfiguration) WithNodeDNSIP(value string) *OpenStackPlatformStatusApplyConfiguration { + b.NodeDNSIP = &value + return b +} + +// WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LoadBalancer field is set to the value of the last call. +func (b *OpenStackPlatformStatusApplyConfiguration) WithLoadBalancer(value *OpenStackPlatformLoadBalancerApplyConfiguration) *OpenStackPlatformStatusApplyConfiguration { + b.LoadBalancer = value + return b +} + +// WithMachineNetworks adds the given value to the MachineNetworks field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MachineNetworks field. +func (b *OpenStackPlatformStatusApplyConfiguration) WithMachineNetworks(values ...configv1.CIDR) *OpenStackPlatformStatusApplyConfiguration { + for i := range values { + b.MachineNetworks = append(b.MachineNetworks, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operandversion.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operandversion.go new file mode 100644 index 0000000000000..6c4336d6eb562 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operandversion.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// OperandVersionApplyConfiguration represents a declarative configuration of the OperandVersion type for use +// with apply. +type OperandVersionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Version *string `json:"version,omitempty"` +} + +// OperandVersionApplyConfiguration constructs a declarative configuration of the OperandVersion type for use with +// apply. +func OperandVersion() *OperandVersionApplyConfiguration { + return &OperandVersionApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *OperandVersionApplyConfiguration) WithName(value string) *OperandVersionApplyConfiguration { + b.Name = &value + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *OperandVersionApplyConfiguration) WithVersion(value string) *OperandVersionApplyConfiguration { + b.Version = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhub.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhub.go new file mode 100644 index 0000000000000..df95eb84d9c52 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhub.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// OperatorHubApplyConfiguration represents a declarative configuration of the OperatorHub type for use +// with apply. +type OperatorHubApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *OperatorHubSpecApplyConfiguration `json:"spec,omitempty"` + Status *OperatorHubStatusApplyConfiguration `json:"status,omitempty"` +} + +// OperatorHub constructs a declarative configuration of the OperatorHub type for use with +// apply. +func OperatorHub(name string) *OperatorHubApplyConfiguration { + b := &OperatorHubApplyConfiguration{} + b.WithName(name) + b.WithKind("OperatorHub") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractOperatorHub extracts the applied configuration owned by fieldManager from +// operatorHub. If no managedFields are found in operatorHub for fieldManager, a +// OperatorHubApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// operatorHub must be a unmodified OperatorHub API object that was retrieved from the Kubernetes API. +// ExtractOperatorHub provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractOperatorHub(operatorHub *configv1.OperatorHub, fieldManager string) (*OperatorHubApplyConfiguration, error) { + return extractOperatorHub(operatorHub, fieldManager, "") +} + +// ExtractOperatorHubStatus is the same as ExtractOperatorHub except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractOperatorHubStatus(operatorHub *configv1.OperatorHub, fieldManager string) (*OperatorHubApplyConfiguration, error) { + return extractOperatorHub(operatorHub, fieldManager, "status") +} + +func extractOperatorHub(operatorHub *configv1.OperatorHub, fieldManager string, subresource string) (*OperatorHubApplyConfiguration, error) { + b := &OperatorHubApplyConfiguration{} + err := managedfields.ExtractInto(operatorHub, internal.Parser().Type("com.github.openshift.api.config.v1.OperatorHub"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(operatorHub.Name) + + b.WithKind("OperatorHub") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *OperatorHubApplyConfiguration) WithKind(value string) *OperatorHubApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *OperatorHubApplyConfiguration) WithAPIVersion(value string) *OperatorHubApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *OperatorHubApplyConfiguration) WithName(value string) *OperatorHubApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *OperatorHubApplyConfiguration) WithGenerateName(value string) *OperatorHubApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *OperatorHubApplyConfiguration) WithNamespace(value string) *OperatorHubApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *OperatorHubApplyConfiguration) WithUID(value types.UID) *OperatorHubApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *OperatorHubApplyConfiguration) WithResourceVersion(value string) *OperatorHubApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *OperatorHubApplyConfiguration) WithGeneration(value int64) *OperatorHubApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *OperatorHubApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *OperatorHubApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *OperatorHubApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *OperatorHubApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *OperatorHubApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *OperatorHubApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *OperatorHubApplyConfiguration) WithLabels(entries map[string]string) *OperatorHubApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *OperatorHubApplyConfiguration) WithAnnotations(entries map[string]string) *OperatorHubApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *OperatorHubApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *OperatorHubApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *OperatorHubApplyConfiguration) WithFinalizers(values ...string) *OperatorHubApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *OperatorHubApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *OperatorHubApplyConfiguration) WithSpec(value *OperatorHubSpecApplyConfiguration) *OperatorHubApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *OperatorHubApplyConfiguration) WithStatus(value *OperatorHubStatusApplyConfiguration) *OperatorHubApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *OperatorHubApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhubspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhubspec.go new file mode 100644 index 0000000000000..56179c4cf9dad --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhubspec.go @@ -0,0 +1,37 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// OperatorHubSpecApplyConfiguration represents a declarative configuration of the OperatorHubSpec type for use +// with apply. +type OperatorHubSpecApplyConfiguration struct { + DisableAllDefaultSources *bool `json:"disableAllDefaultSources,omitempty"` + Sources []HubSourceApplyConfiguration `json:"sources,omitempty"` +} + +// OperatorHubSpecApplyConfiguration constructs a declarative configuration of the OperatorHubSpec type for use with +// apply. +func OperatorHubSpec() *OperatorHubSpecApplyConfiguration { + return &OperatorHubSpecApplyConfiguration{} +} + +// WithDisableAllDefaultSources sets the DisableAllDefaultSources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DisableAllDefaultSources field is set to the value of the last call. +func (b *OperatorHubSpecApplyConfiguration) WithDisableAllDefaultSources(value bool) *OperatorHubSpecApplyConfiguration { + b.DisableAllDefaultSources = &value + return b +} + +// WithSources adds the given value to the Sources field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Sources field. +func (b *OperatorHubSpecApplyConfiguration) WithSources(values ...*HubSourceApplyConfiguration) *OperatorHubSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSources") + } + b.Sources = append(b.Sources, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhubstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhubstatus.go new file mode 100644 index 0000000000000..7e7cda1ac6e35 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhubstatus.go @@ -0,0 +1,28 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// OperatorHubStatusApplyConfiguration represents a declarative configuration of the OperatorHubStatus type for use +// with apply. +type OperatorHubStatusApplyConfiguration struct { + Sources []HubSourceStatusApplyConfiguration `json:"sources,omitempty"` +} + +// OperatorHubStatusApplyConfiguration constructs a declarative configuration of the OperatorHubStatus type for use with +// apply. +func OperatorHubStatus() *OperatorHubStatusApplyConfiguration { + return &OperatorHubStatusApplyConfiguration{} +} + +// WithSources adds the given value to the Sources field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Sources field. +func (b *OperatorHubStatusApplyConfiguration) WithSources(values ...*HubSourceStatusApplyConfiguration) *OperatorHubStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSources") + } + b.Sources = append(b.Sources, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformloadbalancer.go new file mode 100644 index 0000000000000..e81d48044dc23 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformloadbalancer.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// OvirtPlatformLoadBalancerApplyConfiguration represents a declarative configuration of the OvirtPlatformLoadBalancer type for use +// with apply. +type OvirtPlatformLoadBalancerApplyConfiguration struct { + Type *configv1.PlatformLoadBalancerType `json:"type,omitempty"` +} + +// OvirtPlatformLoadBalancerApplyConfiguration constructs a declarative configuration of the OvirtPlatformLoadBalancer type for use with +// apply. +func OvirtPlatformLoadBalancer() *OvirtPlatformLoadBalancerApplyConfiguration { + return &OvirtPlatformLoadBalancerApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *OvirtPlatformLoadBalancerApplyConfiguration) WithType(value configv1.PlatformLoadBalancerType) *OvirtPlatformLoadBalancerApplyConfiguration { + b.Type = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformstatus.go new file mode 100644 index 0000000000000..18ad5d8492019 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformstatus.go @@ -0,0 +1,72 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// OvirtPlatformStatusApplyConfiguration represents a declarative configuration of the OvirtPlatformStatus type for use +// with apply. +type OvirtPlatformStatusApplyConfiguration struct { + APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` + APIServerInternalIPs []string `json:"apiServerInternalIPs,omitempty"` + IngressIP *string `json:"ingressIP,omitempty"` + IngressIPs []string `json:"ingressIPs,omitempty"` + NodeDNSIP *string `json:"nodeDNSIP,omitempty"` + LoadBalancer *OvirtPlatformLoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"` +} + +// OvirtPlatformStatusApplyConfiguration constructs a declarative configuration of the OvirtPlatformStatus type for use with +// apply. +func OvirtPlatformStatus() *OvirtPlatformStatusApplyConfiguration { + return &OvirtPlatformStatusApplyConfiguration{} +} + +// WithAPIServerInternalIP sets the APIServerInternalIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIServerInternalIP field is set to the value of the last call. +func (b *OvirtPlatformStatusApplyConfiguration) WithAPIServerInternalIP(value string) *OvirtPlatformStatusApplyConfiguration { + b.APIServerInternalIP = &value + return b +} + +// WithAPIServerInternalIPs adds the given value to the APIServerInternalIPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the APIServerInternalIPs field. +func (b *OvirtPlatformStatusApplyConfiguration) WithAPIServerInternalIPs(values ...string) *OvirtPlatformStatusApplyConfiguration { + for i := range values { + b.APIServerInternalIPs = append(b.APIServerInternalIPs, values[i]) + } + return b +} + +// WithIngressIP sets the IngressIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IngressIP field is set to the value of the last call. +func (b *OvirtPlatformStatusApplyConfiguration) WithIngressIP(value string) *OvirtPlatformStatusApplyConfiguration { + b.IngressIP = &value + return b +} + +// WithIngressIPs adds the given value to the IngressIPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the IngressIPs field. +func (b *OvirtPlatformStatusApplyConfiguration) WithIngressIPs(values ...string) *OvirtPlatformStatusApplyConfiguration { + for i := range values { + b.IngressIPs = append(b.IngressIPs, values[i]) + } + return b +} + +// WithNodeDNSIP sets the NodeDNSIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeDNSIP field is set to the value of the last call. +func (b *OvirtPlatformStatusApplyConfiguration) WithNodeDNSIP(value string) *OvirtPlatformStatusApplyConfiguration { + b.NodeDNSIP = &value + return b +} + +// WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LoadBalancer field is set to the value of the last call. +func (b *OvirtPlatformStatusApplyConfiguration) WithLoadBalancer(value *OvirtPlatformLoadBalancerApplyConfiguration) *OvirtPlatformStatusApplyConfiguration { + b.LoadBalancer = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformspec.go new file mode 100644 index 0000000000000..517ac0bfc6128 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformspec.go @@ -0,0 +1,153 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// PlatformSpecApplyConfiguration represents a declarative configuration of the PlatformSpec type for use +// with apply. +type PlatformSpecApplyConfiguration struct { + Type *configv1.PlatformType `json:"type,omitempty"` + AWS *AWSPlatformSpecApplyConfiguration `json:"aws,omitempty"` + Azure *configv1.AzurePlatformSpec `json:"azure,omitempty"` + GCP *configv1.GCPPlatformSpec `json:"gcp,omitempty"` + BareMetal *BareMetalPlatformSpecApplyConfiguration `json:"baremetal,omitempty"` + OpenStack *OpenStackPlatformSpecApplyConfiguration `json:"openstack,omitempty"` + Ovirt *configv1.OvirtPlatformSpec `json:"ovirt,omitempty"` + VSphere *VSpherePlatformSpecApplyConfiguration `json:"vsphere,omitempty"` + IBMCloud *configv1.IBMCloudPlatformSpec `json:"ibmcloud,omitempty"` + Kubevirt *configv1.KubevirtPlatformSpec `json:"kubevirt,omitempty"` + EquinixMetal *configv1.EquinixMetalPlatformSpec `json:"equinixMetal,omitempty"` + PowerVS *PowerVSPlatformSpecApplyConfiguration `json:"powervs,omitempty"` + AlibabaCloud *configv1.AlibabaCloudPlatformSpec `json:"alibabaCloud,omitempty"` + Nutanix *NutanixPlatformSpecApplyConfiguration `json:"nutanix,omitempty"` + External *ExternalPlatformSpecApplyConfiguration `json:"external,omitempty"` +} + +// PlatformSpecApplyConfiguration constructs a declarative configuration of the PlatformSpec type for use with +// apply. +func PlatformSpec() *PlatformSpecApplyConfiguration { + return &PlatformSpecApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *PlatformSpecApplyConfiguration) WithType(value configv1.PlatformType) *PlatformSpecApplyConfiguration { + b.Type = &value + return b +} + +// WithAWS sets the AWS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AWS field is set to the value of the last call. +func (b *PlatformSpecApplyConfiguration) WithAWS(value *AWSPlatformSpecApplyConfiguration) *PlatformSpecApplyConfiguration { + b.AWS = value + return b +} + +// WithAzure sets the Azure field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Azure field is set to the value of the last call. +func (b *PlatformSpecApplyConfiguration) WithAzure(value configv1.AzurePlatformSpec) *PlatformSpecApplyConfiguration { + b.Azure = &value + return b +} + +// WithGCP sets the GCP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GCP field is set to the value of the last call. +func (b *PlatformSpecApplyConfiguration) WithGCP(value configv1.GCPPlatformSpec) *PlatformSpecApplyConfiguration { + b.GCP = &value + return b +} + +// WithBareMetal sets the BareMetal field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BareMetal field is set to the value of the last call. +func (b *PlatformSpecApplyConfiguration) WithBareMetal(value *BareMetalPlatformSpecApplyConfiguration) *PlatformSpecApplyConfiguration { + b.BareMetal = value + return b +} + +// WithOpenStack sets the OpenStack field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OpenStack field is set to the value of the last call. +func (b *PlatformSpecApplyConfiguration) WithOpenStack(value *OpenStackPlatformSpecApplyConfiguration) *PlatformSpecApplyConfiguration { + b.OpenStack = value + return b +} + +// WithOvirt sets the Ovirt field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Ovirt field is set to the value of the last call. +func (b *PlatformSpecApplyConfiguration) WithOvirt(value configv1.OvirtPlatformSpec) *PlatformSpecApplyConfiguration { + b.Ovirt = &value + return b +} + +// WithVSphere sets the VSphere field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VSphere field is set to the value of the last call. +func (b *PlatformSpecApplyConfiguration) WithVSphere(value *VSpherePlatformSpecApplyConfiguration) *PlatformSpecApplyConfiguration { + b.VSphere = value + return b +} + +// WithIBMCloud sets the IBMCloud field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IBMCloud field is set to the value of the last call. +func (b *PlatformSpecApplyConfiguration) WithIBMCloud(value configv1.IBMCloudPlatformSpec) *PlatformSpecApplyConfiguration { + b.IBMCloud = &value + return b +} + +// WithKubevirt sets the Kubevirt field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kubevirt field is set to the value of the last call. +func (b *PlatformSpecApplyConfiguration) WithKubevirt(value configv1.KubevirtPlatformSpec) *PlatformSpecApplyConfiguration { + b.Kubevirt = &value + return b +} + +// WithEquinixMetal sets the EquinixMetal field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EquinixMetal field is set to the value of the last call. +func (b *PlatformSpecApplyConfiguration) WithEquinixMetal(value configv1.EquinixMetalPlatformSpec) *PlatformSpecApplyConfiguration { + b.EquinixMetal = &value + return b +} + +// WithPowerVS sets the PowerVS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PowerVS field is set to the value of the last call. +func (b *PlatformSpecApplyConfiguration) WithPowerVS(value *PowerVSPlatformSpecApplyConfiguration) *PlatformSpecApplyConfiguration { + b.PowerVS = value + return b +} + +// WithAlibabaCloud sets the AlibabaCloud field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AlibabaCloud field is set to the value of the last call. +func (b *PlatformSpecApplyConfiguration) WithAlibabaCloud(value configv1.AlibabaCloudPlatformSpec) *PlatformSpecApplyConfiguration { + b.AlibabaCloud = &value + return b +} + +// WithNutanix sets the Nutanix field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Nutanix field is set to the value of the last call. +func (b *PlatformSpecApplyConfiguration) WithNutanix(value *NutanixPlatformSpecApplyConfiguration) *PlatformSpecApplyConfiguration { + b.Nutanix = value + return b +} + +// WithExternal sets the External field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the External field is set to the value of the last call. +func (b *PlatformSpecApplyConfiguration) WithExternal(value *ExternalPlatformSpecApplyConfiguration) *PlatformSpecApplyConfiguration { + b.External = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformstatus.go new file mode 100644 index 0000000000000..e470ebd96af5f --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformstatus.go @@ -0,0 +1,153 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// PlatformStatusApplyConfiguration represents a declarative configuration of the PlatformStatus type for use +// with apply. +type PlatformStatusApplyConfiguration struct { + Type *configv1.PlatformType `json:"type,omitempty"` + AWS *AWSPlatformStatusApplyConfiguration `json:"aws,omitempty"` + Azure *AzurePlatformStatusApplyConfiguration `json:"azure,omitempty"` + GCP *GCPPlatformStatusApplyConfiguration `json:"gcp,omitempty"` + BareMetal *BareMetalPlatformStatusApplyConfiguration `json:"baremetal,omitempty"` + OpenStack *OpenStackPlatformStatusApplyConfiguration `json:"openstack,omitempty"` + Ovirt *OvirtPlatformStatusApplyConfiguration `json:"ovirt,omitempty"` + VSphere *VSpherePlatformStatusApplyConfiguration `json:"vsphere,omitempty"` + IBMCloud *IBMCloudPlatformStatusApplyConfiguration `json:"ibmcloud,omitempty"` + Kubevirt *KubevirtPlatformStatusApplyConfiguration `json:"kubevirt,omitempty"` + EquinixMetal *EquinixMetalPlatformStatusApplyConfiguration `json:"equinixMetal,omitempty"` + PowerVS *PowerVSPlatformStatusApplyConfiguration `json:"powervs,omitempty"` + AlibabaCloud *AlibabaCloudPlatformStatusApplyConfiguration `json:"alibabaCloud,omitempty"` + Nutanix *NutanixPlatformStatusApplyConfiguration `json:"nutanix,omitempty"` + External *ExternalPlatformStatusApplyConfiguration `json:"external,omitempty"` +} + +// PlatformStatusApplyConfiguration constructs a declarative configuration of the PlatformStatus type for use with +// apply. +func PlatformStatus() *PlatformStatusApplyConfiguration { + return &PlatformStatusApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *PlatformStatusApplyConfiguration) WithType(value configv1.PlatformType) *PlatformStatusApplyConfiguration { + b.Type = &value + return b +} + +// WithAWS sets the AWS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AWS field is set to the value of the last call. +func (b *PlatformStatusApplyConfiguration) WithAWS(value *AWSPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration { + b.AWS = value + return b +} + +// WithAzure sets the Azure field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Azure field is set to the value of the last call. +func (b *PlatformStatusApplyConfiguration) WithAzure(value *AzurePlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration { + b.Azure = value + return b +} + +// WithGCP sets the GCP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GCP field is set to the value of the last call. +func (b *PlatformStatusApplyConfiguration) WithGCP(value *GCPPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration { + b.GCP = value + return b +} + +// WithBareMetal sets the BareMetal field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BareMetal field is set to the value of the last call. +func (b *PlatformStatusApplyConfiguration) WithBareMetal(value *BareMetalPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration { + b.BareMetal = value + return b +} + +// WithOpenStack sets the OpenStack field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OpenStack field is set to the value of the last call. +func (b *PlatformStatusApplyConfiguration) WithOpenStack(value *OpenStackPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration { + b.OpenStack = value + return b +} + +// WithOvirt sets the Ovirt field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Ovirt field is set to the value of the last call. +func (b *PlatformStatusApplyConfiguration) WithOvirt(value *OvirtPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration { + b.Ovirt = value + return b +} + +// WithVSphere sets the VSphere field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VSphere field is set to the value of the last call. +func (b *PlatformStatusApplyConfiguration) WithVSphere(value *VSpherePlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration { + b.VSphere = value + return b +} + +// WithIBMCloud sets the IBMCloud field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IBMCloud field is set to the value of the last call. +func (b *PlatformStatusApplyConfiguration) WithIBMCloud(value *IBMCloudPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration { + b.IBMCloud = value + return b +} + +// WithKubevirt sets the Kubevirt field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kubevirt field is set to the value of the last call. +func (b *PlatformStatusApplyConfiguration) WithKubevirt(value *KubevirtPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration { + b.Kubevirt = value + return b +} + +// WithEquinixMetal sets the EquinixMetal field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EquinixMetal field is set to the value of the last call. +func (b *PlatformStatusApplyConfiguration) WithEquinixMetal(value *EquinixMetalPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration { + b.EquinixMetal = value + return b +} + +// WithPowerVS sets the PowerVS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PowerVS field is set to the value of the last call. +func (b *PlatformStatusApplyConfiguration) WithPowerVS(value *PowerVSPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration { + b.PowerVS = value + return b +} + +// WithAlibabaCloud sets the AlibabaCloud field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AlibabaCloud field is set to the value of the last call. +func (b *PlatformStatusApplyConfiguration) WithAlibabaCloud(value *AlibabaCloudPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration { + b.AlibabaCloud = value + return b +} + +// WithNutanix sets the Nutanix field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Nutanix field is set to the value of the last call. +func (b *PlatformStatusApplyConfiguration) WithNutanix(value *NutanixPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration { + b.Nutanix = value + return b +} + +// WithExternal sets the External field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the External field is set to the value of the last call. +func (b *PlatformStatusApplyConfiguration) WithExternal(value *ExternalPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration { + b.External = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformspec.go new file mode 100644 index 0000000000000..db3c3d1d932cd --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformspec.go @@ -0,0 +1,28 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PowerVSPlatformSpecApplyConfiguration represents a declarative configuration of the PowerVSPlatformSpec type for use +// with apply. +type PowerVSPlatformSpecApplyConfiguration struct { + ServiceEndpoints []PowerVSServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"` +} + +// PowerVSPlatformSpecApplyConfiguration constructs a declarative configuration of the PowerVSPlatformSpec type for use with +// apply. +func PowerVSPlatformSpec() *PowerVSPlatformSpecApplyConfiguration { + return &PowerVSPlatformSpecApplyConfiguration{} +} + +// WithServiceEndpoints adds the given value to the ServiceEndpoints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ServiceEndpoints field. +func (b *PowerVSPlatformSpecApplyConfiguration) WithServiceEndpoints(values ...*PowerVSServiceEndpointApplyConfiguration) *PowerVSPlatformSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithServiceEndpoints") + } + b.ServiceEndpoints = append(b.ServiceEndpoints, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformstatus.go new file mode 100644 index 0000000000000..f40099f16f830 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformstatus.go @@ -0,0 +1,73 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PowerVSPlatformStatusApplyConfiguration represents a declarative configuration of the PowerVSPlatformStatus type for use +// with apply. +type PowerVSPlatformStatusApplyConfiguration struct { + Region *string `json:"region,omitempty"` + Zone *string `json:"zone,omitempty"` + ResourceGroup *string `json:"resourceGroup,omitempty"` + ServiceEndpoints []PowerVSServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"` + CISInstanceCRN *string `json:"cisInstanceCRN,omitempty"` + DNSInstanceCRN *string `json:"dnsInstanceCRN,omitempty"` +} + +// PowerVSPlatformStatusApplyConfiguration constructs a declarative configuration of the PowerVSPlatformStatus type for use with +// apply. +func PowerVSPlatformStatus() *PowerVSPlatformStatusApplyConfiguration { + return &PowerVSPlatformStatusApplyConfiguration{} +} + +// WithRegion sets the Region field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Region field is set to the value of the last call. +func (b *PowerVSPlatformStatusApplyConfiguration) WithRegion(value string) *PowerVSPlatformStatusApplyConfiguration { + b.Region = &value + return b +} + +// WithZone sets the Zone field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Zone field is set to the value of the last call. +func (b *PowerVSPlatformStatusApplyConfiguration) WithZone(value string) *PowerVSPlatformStatusApplyConfiguration { + b.Zone = &value + return b +} + +// WithResourceGroup sets the ResourceGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceGroup field is set to the value of the last call. +func (b *PowerVSPlatformStatusApplyConfiguration) WithResourceGroup(value string) *PowerVSPlatformStatusApplyConfiguration { + b.ResourceGroup = &value + return b +} + +// WithServiceEndpoints adds the given value to the ServiceEndpoints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ServiceEndpoints field. +func (b *PowerVSPlatformStatusApplyConfiguration) WithServiceEndpoints(values ...*PowerVSServiceEndpointApplyConfiguration) *PowerVSPlatformStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithServiceEndpoints") + } + b.ServiceEndpoints = append(b.ServiceEndpoints, *values[i]) + } + return b +} + +// WithCISInstanceCRN sets the CISInstanceCRN field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CISInstanceCRN field is set to the value of the last call. +func (b *PowerVSPlatformStatusApplyConfiguration) WithCISInstanceCRN(value string) *PowerVSPlatformStatusApplyConfiguration { + b.CISInstanceCRN = &value + return b +} + +// WithDNSInstanceCRN sets the DNSInstanceCRN field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DNSInstanceCRN field is set to the value of the last call. +func (b *PowerVSPlatformStatusApplyConfiguration) WithDNSInstanceCRN(value string) *PowerVSPlatformStatusApplyConfiguration { + b.DNSInstanceCRN = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsserviceendpoint.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsserviceendpoint.go new file mode 100644 index 0000000000000..8fd231a2ab9ff --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsserviceendpoint.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PowerVSServiceEndpointApplyConfiguration represents a declarative configuration of the PowerVSServiceEndpoint type for use +// with apply. +type PowerVSServiceEndpointApplyConfiguration struct { + Name *string `json:"name,omitempty"` + URL *string `json:"url,omitempty"` +} + +// PowerVSServiceEndpointApplyConfiguration constructs a declarative configuration of the PowerVSServiceEndpoint type for use with +// apply. +func PowerVSServiceEndpoint() *PowerVSServiceEndpointApplyConfiguration { + return &PowerVSServiceEndpointApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PowerVSServiceEndpointApplyConfiguration) WithName(value string) *PowerVSServiceEndpointApplyConfiguration { + b.Name = &value + return b +} + +// WithURL sets the URL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the URL field is set to the value of the last call. +func (b *PowerVSServiceEndpointApplyConfiguration) WithURL(value string) *PowerVSServiceEndpointApplyConfiguration { + b.URL = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/prefixedclaimmapping.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/prefixedclaimmapping.go new file mode 100644 index 0000000000000..2455204339727 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/prefixedclaimmapping.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PrefixedClaimMappingApplyConfiguration represents a declarative configuration of the PrefixedClaimMapping type for use +// with apply. +type PrefixedClaimMappingApplyConfiguration struct { + TokenClaimMappingApplyConfiguration `json:",inline"` + Prefix *string `json:"prefix,omitempty"` +} + +// PrefixedClaimMappingApplyConfiguration constructs a declarative configuration of the PrefixedClaimMapping type for use with +// apply. +func PrefixedClaimMapping() *PrefixedClaimMappingApplyConfiguration { + return &PrefixedClaimMappingApplyConfiguration{} +} + +// WithClaim sets the Claim field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Claim field is set to the value of the last call. +func (b *PrefixedClaimMappingApplyConfiguration) WithClaim(value string) *PrefixedClaimMappingApplyConfiguration { + b.TokenClaimMappingApplyConfiguration.Claim = &value + return b +} + +// WithPrefix sets the Prefix field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Prefix field is set to the value of the last call. +func (b *PrefixedClaimMappingApplyConfiguration) WithPrefix(value string) *PrefixedClaimMappingApplyConfiguration { + b.Prefix = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/profilecustomizations.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/profilecustomizations.go new file mode 100644 index 0000000000000..c2392bab98101 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/profilecustomizations.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// ProfileCustomizationsApplyConfiguration represents a declarative configuration of the ProfileCustomizations type for use +// with apply. +type ProfileCustomizationsApplyConfiguration struct { + DynamicResourceAllocation *configv1.DRAEnablement `json:"dynamicResourceAllocation,omitempty"` +} + +// ProfileCustomizationsApplyConfiguration constructs a declarative configuration of the ProfileCustomizations type for use with +// apply. +func ProfileCustomizations() *ProfileCustomizationsApplyConfiguration { + return &ProfileCustomizationsApplyConfiguration{} +} + +// WithDynamicResourceAllocation sets the DynamicResourceAllocation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DynamicResourceAllocation field is set to the value of the last call. +func (b *ProfileCustomizationsApplyConfiguration) WithDynamicResourceAllocation(value configv1.DRAEnablement) *ProfileCustomizationsApplyConfiguration { + b.DynamicResourceAllocation = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/project.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/project.go new file mode 100644 index 0000000000000..5c040bae4d587 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/project.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ProjectApplyConfiguration represents a declarative configuration of the Project type for use +// with apply. +type ProjectApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ProjectSpecApplyConfiguration `json:"spec,omitempty"` + Status *configv1.ProjectStatus `json:"status,omitempty"` +} + +// Project constructs a declarative configuration of the Project type for use with +// apply. +func Project(name string) *ProjectApplyConfiguration { + b := &ProjectApplyConfiguration{} + b.WithName(name) + b.WithKind("Project") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractProject extracts the applied configuration owned by fieldManager from +// project. If no managedFields are found in project for fieldManager, a +// ProjectApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// project must be a unmodified Project API object that was retrieved from the Kubernetes API. +// ExtractProject provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractProject(project *configv1.Project, fieldManager string) (*ProjectApplyConfiguration, error) { + return extractProject(project, fieldManager, "") +} + +// ExtractProjectStatus is the same as ExtractProject except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractProjectStatus(project *configv1.Project, fieldManager string) (*ProjectApplyConfiguration, error) { + return extractProject(project, fieldManager, "status") +} + +func extractProject(project *configv1.Project, fieldManager string, subresource string) (*ProjectApplyConfiguration, error) { + b := &ProjectApplyConfiguration{} + err := managedfields.ExtractInto(project, internal.Parser().Type("com.github.openshift.api.config.v1.Project"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(project.Name) + + b.WithKind("Project") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ProjectApplyConfiguration) WithKind(value string) *ProjectApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ProjectApplyConfiguration) WithAPIVersion(value string) *ProjectApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ProjectApplyConfiguration) WithName(value string) *ProjectApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ProjectApplyConfiguration) WithGenerateName(value string) *ProjectApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ProjectApplyConfiguration) WithNamespace(value string) *ProjectApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ProjectApplyConfiguration) WithUID(value types.UID) *ProjectApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ProjectApplyConfiguration) WithResourceVersion(value string) *ProjectApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ProjectApplyConfiguration) WithGeneration(value int64) *ProjectApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ProjectApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ProjectApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ProjectApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ProjectApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ProjectApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ProjectApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ProjectApplyConfiguration) WithLabels(entries map[string]string) *ProjectApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ProjectApplyConfiguration) WithAnnotations(entries map[string]string) *ProjectApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ProjectApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ProjectApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ProjectApplyConfiguration) WithFinalizers(values ...string) *ProjectApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ProjectApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ProjectApplyConfiguration) WithSpec(value *ProjectSpecApplyConfiguration) *ProjectApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ProjectApplyConfiguration) WithStatus(value configv1.ProjectStatus) *ProjectApplyConfiguration { + b.Status = &value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ProjectApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/projectspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/projectspec.go new file mode 100644 index 0000000000000..417be90be423b --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/projectspec.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ProjectSpecApplyConfiguration represents a declarative configuration of the ProjectSpec type for use +// with apply. +type ProjectSpecApplyConfiguration struct { + ProjectRequestMessage *string `json:"projectRequestMessage,omitempty"` + ProjectRequestTemplate *TemplateReferenceApplyConfiguration `json:"projectRequestTemplate,omitempty"` +} + +// ProjectSpecApplyConfiguration constructs a declarative configuration of the ProjectSpec type for use with +// apply. +func ProjectSpec() *ProjectSpecApplyConfiguration { + return &ProjectSpecApplyConfiguration{} +} + +// WithProjectRequestMessage sets the ProjectRequestMessage field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProjectRequestMessage field is set to the value of the last call. +func (b *ProjectSpecApplyConfiguration) WithProjectRequestMessage(value string) *ProjectSpecApplyConfiguration { + b.ProjectRequestMessage = &value + return b +} + +// WithProjectRequestTemplate sets the ProjectRequestTemplate field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProjectRequestTemplate field is set to the value of the last call. +func (b *ProjectSpecApplyConfiguration) WithProjectRequestTemplate(value *TemplateReferenceApplyConfiguration) *ProjectSpecApplyConfiguration { + b.ProjectRequestTemplate = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/promqlclustercondition.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/promqlclustercondition.go new file mode 100644 index 0000000000000..e3f40e4f9ee6a --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/promqlclustercondition.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PromQLClusterConditionApplyConfiguration represents a declarative configuration of the PromQLClusterCondition type for use +// with apply. +type PromQLClusterConditionApplyConfiguration struct { + PromQL *string `json:"promql,omitempty"` +} + +// PromQLClusterConditionApplyConfiguration constructs a declarative configuration of the PromQLClusterCondition type for use with +// apply. +func PromQLClusterCondition() *PromQLClusterConditionApplyConfiguration { + return &PromQLClusterConditionApplyConfiguration{} +} + +// WithPromQL sets the PromQL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PromQL field is set to the value of the last call. +func (b *PromQLClusterConditionApplyConfiguration) WithPromQL(value string) *PromQLClusterConditionApplyConfiguration { + b.PromQL = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxy.go new file mode 100644 index 0000000000000..7184cbd0827c6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxy.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ProxyApplyConfiguration represents a declarative configuration of the Proxy type for use +// with apply. +type ProxyApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ProxySpecApplyConfiguration `json:"spec,omitempty"` + Status *ProxyStatusApplyConfiguration `json:"status,omitempty"` +} + +// Proxy constructs a declarative configuration of the Proxy type for use with +// apply. +func Proxy(name string) *ProxyApplyConfiguration { + b := &ProxyApplyConfiguration{} + b.WithName(name) + b.WithKind("Proxy") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractProxy extracts the applied configuration owned by fieldManager from +// proxy. If no managedFields are found in proxy for fieldManager, a +// ProxyApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// proxy must be a unmodified Proxy API object that was retrieved from the Kubernetes API. +// ExtractProxy provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractProxy(proxy *configv1.Proxy, fieldManager string) (*ProxyApplyConfiguration, error) { + return extractProxy(proxy, fieldManager, "") +} + +// ExtractProxyStatus is the same as ExtractProxy except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractProxyStatus(proxy *configv1.Proxy, fieldManager string) (*ProxyApplyConfiguration, error) { + return extractProxy(proxy, fieldManager, "status") +} + +func extractProxy(proxy *configv1.Proxy, fieldManager string, subresource string) (*ProxyApplyConfiguration, error) { + b := &ProxyApplyConfiguration{} + err := managedfields.ExtractInto(proxy, internal.Parser().Type("com.github.openshift.api.config.v1.Proxy"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(proxy.Name) + + b.WithKind("Proxy") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ProxyApplyConfiguration) WithKind(value string) *ProxyApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ProxyApplyConfiguration) WithAPIVersion(value string) *ProxyApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ProxyApplyConfiguration) WithName(value string) *ProxyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ProxyApplyConfiguration) WithGenerateName(value string) *ProxyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ProxyApplyConfiguration) WithNamespace(value string) *ProxyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ProxyApplyConfiguration) WithUID(value types.UID) *ProxyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ProxyApplyConfiguration) WithResourceVersion(value string) *ProxyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ProxyApplyConfiguration) WithGeneration(value int64) *ProxyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ProxyApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ProxyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ProxyApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ProxyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ProxyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ProxyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ProxyApplyConfiguration) WithLabels(entries map[string]string) *ProxyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ProxyApplyConfiguration) WithAnnotations(entries map[string]string) *ProxyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ProxyApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ProxyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ProxyApplyConfiguration) WithFinalizers(values ...string) *ProxyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ProxyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ProxyApplyConfiguration) WithSpec(value *ProxySpecApplyConfiguration) *ProxyApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ProxyApplyConfiguration) WithStatus(value *ProxyStatusApplyConfiguration) *ProxyApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ProxyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxyspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxyspec.go new file mode 100644 index 0000000000000..bd2cf66570db3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxyspec.go @@ -0,0 +1,61 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ProxySpecApplyConfiguration represents a declarative configuration of the ProxySpec type for use +// with apply. +type ProxySpecApplyConfiguration struct { + HTTPProxy *string `json:"httpProxy,omitempty"` + HTTPSProxy *string `json:"httpsProxy,omitempty"` + NoProxy *string `json:"noProxy,omitempty"` + ReadinessEndpoints []string `json:"readinessEndpoints,omitempty"` + TrustedCA *ConfigMapNameReferenceApplyConfiguration `json:"trustedCA,omitempty"` +} + +// ProxySpecApplyConfiguration constructs a declarative configuration of the ProxySpec type for use with +// apply. +func ProxySpec() *ProxySpecApplyConfiguration { + return &ProxySpecApplyConfiguration{} +} + +// WithHTTPProxy sets the HTTPProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPProxy field is set to the value of the last call. +func (b *ProxySpecApplyConfiguration) WithHTTPProxy(value string) *ProxySpecApplyConfiguration { + b.HTTPProxy = &value + return b +} + +// WithHTTPSProxy sets the HTTPSProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPSProxy field is set to the value of the last call. +func (b *ProxySpecApplyConfiguration) WithHTTPSProxy(value string) *ProxySpecApplyConfiguration { + b.HTTPSProxy = &value + return b +} + +// WithNoProxy sets the NoProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NoProxy field is set to the value of the last call. +func (b *ProxySpecApplyConfiguration) WithNoProxy(value string) *ProxySpecApplyConfiguration { + b.NoProxy = &value + return b +} + +// WithReadinessEndpoints adds the given value to the ReadinessEndpoints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ReadinessEndpoints field. +func (b *ProxySpecApplyConfiguration) WithReadinessEndpoints(values ...string) *ProxySpecApplyConfiguration { + for i := range values { + b.ReadinessEndpoints = append(b.ReadinessEndpoints, values[i]) + } + return b +} + +// WithTrustedCA sets the TrustedCA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TrustedCA field is set to the value of the last call. +func (b *ProxySpecApplyConfiguration) WithTrustedCA(value *ConfigMapNameReferenceApplyConfiguration) *ProxySpecApplyConfiguration { + b.TrustedCA = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxystatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxystatus.go new file mode 100644 index 0000000000000..784afdff6f7d4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxystatus.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ProxyStatusApplyConfiguration represents a declarative configuration of the ProxyStatus type for use +// with apply. +type ProxyStatusApplyConfiguration struct { + HTTPProxy *string `json:"httpProxy,omitempty"` + HTTPSProxy *string `json:"httpsProxy,omitempty"` + NoProxy *string `json:"noProxy,omitempty"` +} + +// ProxyStatusApplyConfiguration constructs a declarative configuration of the ProxyStatus type for use with +// apply. +func ProxyStatus() *ProxyStatusApplyConfiguration { + return &ProxyStatusApplyConfiguration{} +} + +// WithHTTPProxy sets the HTTPProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPProxy field is set to the value of the last call. +func (b *ProxyStatusApplyConfiguration) WithHTTPProxy(value string) *ProxyStatusApplyConfiguration { + b.HTTPProxy = &value + return b +} + +// WithHTTPSProxy sets the HTTPSProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPSProxy field is set to the value of the last call. +func (b *ProxyStatusApplyConfiguration) WithHTTPSProxy(value string) *ProxyStatusApplyConfiguration { + b.HTTPSProxy = &value + return b +} + +// WithNoProxy sets the NoProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NoProxy field is set to the value of the last call. +func (b *ProxyStatusApplyConfiguration) WithNoProxy(value string) *ProxyStatusApplyConfiguration { + b.NoProxy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/registrylocation.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/registrylocation.go new file mode 100644 index 0000000000000..d4aaa4e1e8d84 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/registrylocation.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// RegistryLocationApplyConfiguration represents a declarative configuration of the RegistryLocation type for use +// with apply. +type RegistryLocationApplyConfiguration struct { + DomainName *string `json:"domainName,omitempty"` + Insecure *bool `json:"insecure,omitempty"` +} + +// RegistryLocationApplyConfiguration constructs a declarative configuration of the RegistryLocation type for use with +// apply. +func RegistryLocation() *RegistryLocationApplyConfiguration { + return &RegistryLocationApplyConfiguration{} +} + +// WithDomainName sets the DomainName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DomainName field is set to the value of the last call. +func (b *RegistryLocationApplyConfiguration) WithDomainName(value string) *RegistryLocationApplyConfiguration { + b.DomainName = &value + return b +} + +// WithInsecure sets the Insecure field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Insecure field is set to the value of the last call. +func (b *RegistryLocationApplyConfiguration) WithInsecure(value bool) *RegistryLocationApplyConfiguration { + b.Insecure = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/registrysources.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/registrysources.go new file mode 100644 index 0000000000000..a92592f304552 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/registrysources.go @@ -0,0 +1,58 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// RegistrySourcesApplyConfiguration represents a declarative configuration of the RegistrySources type for use +// with apply. +type RegistrySourcesApplyConfiguration struct { + InsecureRegistries []string `json:"insecureRegistries,omitempty"` + BlockedRegistries []string `json:"blockedRegistries,omitempty"` + AllowedRegistries []string `json:"allowedRegistries,omitempty"` + ContainerRuntimeSearchRegistries []string `json:"containerRuntimeSearchRegistries,omitempty"` +} + +// RegistrySourcesApplyConfiguration constructs a declarative configuration of the RegistrySources type for use with +// apply. +func RegistrySources() *RegistrySourcesApplyConfiguration { + return &RegistrySourcesApplyConfiguration{} +} + +// WithInsecureRegistries adds the given value to the InsecureRegistries field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the InsecureRegistries field. +func (b *RegistrySourcesApplyConfiguration) WithInsecureRegistries(values ...string) *RegistrySourcesApplyConfiguration { + for i := range values { + b.InsecureRegistries = append(b.InsecureRegistries, values[i]) + } + return b +} + +// WithBlockedRegistries adds the given value to the BlockedRegistries field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the BlockedRegistries field. +func (b *RegistrySourcesApplyConfiguration) WithBlockedRegistries(values ...string) *RegistrySourcesApplyConfiguration { + for i := range values { + b.BlockedRegistries = append(b.BlockedRegistries, values[i]) + } + return b +} + +// WithAllowedRegistries adds the given value to the AllowedRegistries field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AllowedRegistries field. +func (b *RegistrySourcesApplyConfiguration) WithAllowedRegistries(values ...string) *RegistrySourcesApplyConfiguration { + for i := range values { + b.AllowedRegistries = append(b.AllowedRegistries, values[i]) + } + return b +} + +// WithContainerRuntimeSearchRegistries adds the given value to the ContainerRuntimeSearchRegistries field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ContainerRuntimeSearchRegistries field. +func (b *RegistrySourcesApplyConfiguration) WithContainerRuntimeSearchRegistries(values ...string) *RegistrySourcesApplyConfiguration { + for i := range values { + b.ContainerRuntimeSearchRegistries = append(b.ContainerRuntimeSearchRegistries, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/release.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/release.go new file mode 100644 index 0000000000000..c8275fcde24d8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/release.go @@ -0,0 +1,65 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// ReleaseApplyConfiguration represents a declarative configuration of the Release type for use +// with apply. +type ReleaseApplyConfiguration struct { + Architecture *configv1.ClusterVersionArchitecture `json:"architecture,omitempty"` + Version *string `json:"version,omitempty"` + Image *string `json:"image,omitempty"` + URL *configv1.URL `json:"url,omitempty"` + Channels []string `json:"channels,omitempty"` +} + +// ReleaseApplyConfiguration constructs a declarative configuration of the Release type for use with +// apply. +func Release() *ReleaseApplyConfiguration { + return &ReleaseApplyConfiguration{} +} + +// WithArchitecture sets the Architecture field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Architecture field is set to the value of the last call. +func (b *ReleaseApplyConfiguration) WithArchitecture(value configv1.ClusterVersionArchitecture) *ReleaseApplyConfiguration { + b.Architecture = &value + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *ReleaseApplyConfiguration) WithVersion(value string) *ReleaseApplyConfiguration { + b.Version = &value + return b +} + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *ReleaseApplyConfiguration) WithImage(value string) *ReleaseApplyConfiguration { + b.Image = &value + return b +} + +// WithURL sets the URL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the URL field is set to the value of the last call. +func (b *ReleaseApplyConfiguration) WithURL(value configv1.URL) *ReleaseApplyConfiguration { + b.URL = &value + return b +} + +// WithChannels adds the given value to the Channels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Channels field. +func (b *ReleaseApplyConfiguration) WithChannels(values ...string) *ReleaseApplyConfiguration { + for i := range values { + b.Channels = append(b.Channels, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/repositorydigestmirrors.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/repositorydigestmirrors.go new file mode 100644 index 0000000000000..96f7240951abe --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/repositorydigestmirrors.go @@ -0,0 +1,47 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// RepositoryDigestMirrorsApplyConfiguration represents a declarative configuration of the RepositoryDigestMirrors type for use +// with apply. +type RepositoryDigestMirrorsApplyConfiguration struct { + Source *string `json:"source,omitempty"` + AllowMirrorByTags *bool `json:"allowMirrorByTags,omitempty"` + Mirrors []configv1.Mirror `json:"mirrors,omitempty"` +} + +// RepositoryDigestMirrorsApplyConfiguration constructs a declarative configuration of the RepositoryDigestMirrors type for use with +// apply. +func RepositoryDigestMirrors() *RepositoryDigestMirrorsApplyConfiguration { + return &RepositoryDigestMirrorsApplyConfiguration{} +} + +// WithSource sets the Source field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Source field is set to the value of the last call. +func (b *RepositoryDigestMirrorsApplyConfiguration) WithSource(value string) *RepositoryDigestMirrorsApplyConfiguration { + b.Source = &value + return b +} + +// WithAllowMirrorByTags sets the AllowMirrorByTags field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllowMirrorByTags field is set to the value of the last call. +func (b *RepositoryDigestMirrorsApplyConfiguration) WithAllowMirrorByTags(value bool) *RepositoryDigestMirrorsApplyConfiguration { + b.AllowMirrorByTags = &value + return b +} + +// WithMirrors adds the given value to the Mirrors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Mirrors field. +func (b *RepositoryDigestMirrorsApplyConfiguration) WithMirrors(values ...configv1.Mirror) *RepositoryDigestMirrorsApplyConfiguration { + for i := range values { + b.Mirrors = append(b.Mirrors, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requestheaderidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requestheaderidentityprovider.go new file mode 100644 index 0000000000000..2911473d024bc --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requestheaderidentityprovider.go @@ -0,0 +1,96 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// RequestHeaderIdentityProviderApplyConfiguration represents a declarative configuration of the RequestHeaderIdentityProvider type for use +// with apply. +type RequestHeaderIdentityProviderApplyConfiguration struct { + LoginURL *string `json:"loginURL,omitempty"` + ChallengeURL *string `json:"challengeURL,omitempty"` + ClientCA *ConfigMapNameReferenceApplyConfiguration `json:"ca,omitempty"` + ClientCommonNames []string `json:"clientCommonNames,omitempty"` + Headers []string `json:"headers,omitempty"` + PreferredUsernameHeaders []string `json:"preferredUsernameHeaders,omitempty"` + NameHeaders []string `json:"nameHeaders,omitempty"` + EmailHeaders []string `json:"emailHeaders,omitempty"` +} + +// RequestHeaderIdentityProviderApplyConfiguration constructs a declarative configuration of the RequestHeaderIdentityProvider type for use with +// apply. +func RequestHeaderIdentityProvider() *RequestHeaderIdentityProviderApplyConfiguration { + return &RequestHeaderIdentityProviderApplyConfiguration{} +} + +// WithLoginURL sets the LoginURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LoginURL field is set to the value of the last call. +func (b *RequestHeaderIdentityProviderApplyConfiguration) WithLoginURL(value string) *RequestHeaderIdentityProviderApplyConfiguration { + b.LoginURL = &value + return b +} + +// WithChallengeURL sets the ChallengeURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ChallengeURL field is set to the value of the last call. +func (b *RequestHeaderIdentityProviderApplyConfiguration) WithChallengeURL(value string) *RequestHeaderIdentityProviderApplyConfiguration { + b.ChallengeURL = &value + return b +} + +// WithClientCA sets the ClientCA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientCA field is set to the value of the last call. +func (b *RequestHeaderIdentityProviderApplyConfiguration) WithClientCA(value *ConfigMapNameReferenceApplyConfiguration) *RequestHeaderIdentityProviderApplyConfiguration { + b.ClientCA = value + return b +} + +// WithClientCommonNames adds the given value to the ClientCommonNames field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ClientCommonNames field. +func (b *RequestHeaderIdentityProviderApplyConfiguration) WithClientCommonNames(values ...string) *RequestHeaderIdentityProviderApplyConfiguration { + for i := range values { + b.ClientCommonNames = append(b.ClientCommonNames, values[i]) + } + return b +} + +// WithHeaders adds the given value to the Headers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Headers field. +func (b *RequestHeaderIdentityProviderApplyConfiguration) WithHeaders(values ...string) *RequestHeaderIdentityProviderApplyConfiguration { + for i := range values { + b.Headers = append(b.Headers, values[i]) + } + return b +} + +// WithPreferredUsernameHeaders adds the given value to the PreferredUsernameHeaders field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the PreferredUsernameHeaders field. +func (b *RequestHeaderIdentityProviderApplyConfiguration) WithPreferredUsernameHeaders(values ...string) *RequestHeaderIdentityProviderApplyConfiguration { + for i := range values { + b.PreferredUsernameHeaders = append(b.PreferredUsernameHeaders, values[i]) + } + return b +} + +// WithNameHeaders adds the given value to the NameHeaders field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NameHeaders field. +func (b *RequestHeaderIdentityProviderApplyConfiguration) WithNameHeaders(values ...string) *RequestHeaderIdentityProviderApplyConfiguration { + for i := range values { + b.NameHeaders = append(b.NameHeaders, values[i]) + } + return b +} + +// WithEmailHeaders adds the given value to the EmailHeaders field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the EmailHeaders field. +func (b *RequestHeaderIdentityProviderApplyConfiguration) WithEmailHeaders(values ...string) *RequestHeaderIdentityProviderApplyConfiguration { + for i := range values { + b.EmailHeaders = append(b.EmailHeaders, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requiredhstspolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requiredhstspolicy.go new file mode 100644 index 0000000000000..c68466123a223 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requiredhstspolicy.go @@ -0,0 +1,66 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// RequiredHSTSPolicyApplyConfiguration represents a declarative configuration of the RequiredHSTSPolicy type for use +// with apply. +type RequiredHSTSPolicyApplyConfiguration struct { + NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + DomainPatterns []string `json:"domainPatterns,omitempty"` + MaxAge *MaxAgePolicyApplyConfiguration `json:"maxAge,omitempty"` + PreloadPolicy *configv1.PreloadPolicy `json:"preloadPolicy,omitempty"` + IncludeSubDomainsPolicy *configv1.IncludeSubDomainsPolicy `json:"includeSubDomainsPolicy,omitempty"` +} + +// RequiredHSTSPolicyApplyConfiguration constructs a declarative configuration of the RequiredHSTSPolicy type for use with +// apply. +func RequiredHSTSPolicy() *RequiredHSTSPolicyApplyConfiguration { + return &RequiredHSTSPolicyApplyConfiguration{} +} + +// WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NamespaceSelector field is set to the value of the last call. +func (b *RequiredHSTSPolicyApplyConfiguration) WithNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *RequiredHSTSPolicyApplyConfiguration { + b.NamespaceSelector = value + return b +} + +// WithDomainPatterns adds the given value to the DomainPatterns field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the DomainPatterns field. +func (b *RequiredHSTSPolicyApplyConfiguration) WithDomainPatterns(values ...string) *RequiredHSTSPolicyApplyConfiguration { + for i := range values { + b.DomainPatterns = append(b.DomainPatterns, values[i]) + } + return b +} + +// WithMaxAge sets the MaxAge field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxAge field is set to the value of the last call. +func (b *RequiredHSTSPolicyApplyConfiguration) WithMaxAge(value *MaxAgePolicyApplyConfiguration) *RequiredHSTSPolicyApplyConfiguration { + b.MaxAge = value + return b +} + +// WithPreloadPolicy sets the PreloadPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PreloadPolicy field is set to the value of the last call. +func (b *RequiredHSTSPolicyApplyConfiguration) WithPreloadPolicy(value configv1.PreloadPolicy) *RequiredHSTSPolicyApplyConfiguration { + b.PreloadPolicy = &value + return b +} + +// WithIncludeSubDomainsPolicy sets the IncludeSubDomainsPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IncludeSubDomainsPolicy field is set to the value of the last call. +func (b *RequiredHSTSPolicyApplyConfiguration) WithIncludeSubDomainsPolicy(value configv1.IncludeSubDomainsPolicy) *RequiredHSTSPolicyApplyConfiguration { + b.IncludeSubDomainsPolicy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/scheduler.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/scheduler.go new file mode 100644 index 0000000000000..fa2323d72419c --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/scheduler.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// SchedulerApplyConfiguration represents a declarative configuration of the Scheduler type for use +// with apply. +type SchedulerApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *SchedulerSpecApplyConfiguration `json:"spec,omitempty"` + Status *configv1.SchedulerStatus `json:"status,omitempty"` +} + +// Scheduler constructs a declarative configuration of the Scheduler type for use with +// apply. +func Scheduler(name string) *SchedulerApplyConfiguration { + b := &SchedulerApplyConfiguration{} + b.WithName(name) + b.WithKind("Scheduler") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractScheduler extracts the applied configuration owned by fieldManager from +// scheduler. If no managedFields are found in scheduler for fieldManager, a +// SchedulerApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// scheduler must be a unmodified Scheduler API object that was retrieved from the Kubernetes API. +// ExtractScheduler provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractScheduler(scheduler *configv1.Scheduler, fieldManager string) (*SchedulerApplyConfiguration, error) { + return extractScheduler(scheduler, fieldManager, "") +} + +// ExtractSchedulerStatus is the same as ExtractScheduler except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractSchedulerStatus(scheduler *configv1.Scheduler, fieldManager string) (*SchedulerApplyConfiguration, error) { + return extractScheduler(scheduler, fieldManager, "status") +} + +func extractScheduler(scheduler *configv1.Scheduler, fieldManager string, subresource string) (*SchedulerApplyConfiguration, error) { + b := &SchedulerApplyConfiguration{} + err := managedfields.ExtractInto(scheduler, internal.Parser().Type("com.github.openshift.api.config.v1.Scheduler"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(scheduler.Name) + + b.WithKind("Scheduler") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *SchedulerApplyConfiguration) WithKind(value string) *SchedulerApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *SchedulerApplyConfiguration) WithAPIVersion(value string) *SchedulerApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *SchedulerApplyConfiguration) WithName(value string) *SchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *SchedulerApplyConfiguration) WithGenerateName(value string) *SchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *SchedulerApplyConfiguration) WithNamespace(value string) *SchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *SchedulerApplyConfiguration) WithUID(value types.UID) *SchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *SchedulerApplyConfiguration) WithResourceVersion(value string) *SchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *SchedulerApplyConfiguration) WithGeneration(value int64) *SchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *SchedulerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *SchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *SchedulerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *SchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *SchedulerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *SchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *SchedulerApplyConfiguration) WithLabels(entries map[string]string) *SchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *SchedulerApplyConfiguration) WithAnnotations(entries map[string]string) *SchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *SchedulerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *SchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *SchedulerApplyConfiguration) WithFinalizers(values ...string) *SchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *SchedulerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *SchedulerApplyConfiguration) WithSpec(value *SchedulerSpecApplyConfiguration) *SchedulerApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *SchedulerApplyConfiguration) WithStatus(value configv1.SchedulerStatus) *SchedulerApplyConfiguration { + b.Status = &value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *SchedulerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/schedulerspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/schedulerspec.go new file mode 100644 index 0000000000000..2160ab2ff5316 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/schedulerspec.go @@ -0,0 +1,63 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// SchedulerSpecApplyConfiguration represents a declarative configuration of the SchedulerSpec type for use +// with apply. +type SchedulerSpecApplyConfiguration struct { + Policy *ConfigMapNameReferenceApplyConfiguration `json:"policy,omitempty"` + Profile *configv1.SchedulerProfile `json:"profile,omitempty"` + ProfileCustomizations *ProfileCustomizationsApplyConfiguration `json:"profileCustomizations,omitempty"` + DefaultNodeSelector *string `json:"defaultNodeSelector,omitempty"` + MastersSchedulable *bool `json:"mastersSchedulable,omitempty"` +} + +// SchedulerSpecApplyConfiguration constructs a declarative configuration of the SchedulerSpec type for use with +// apply. +func SchedulerSpec() *SchedulerSpecApplyConfiguration { + return &SchedulerSpecApplyConfiguration{} +} + +// WithPolicy sets the Policy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Policy field is set to the value of the last call. +func (b *SchedulerSpecApplyConfiguration) WithPolicy(value *ConfigMapNameReferenceApplyConfiguration) *SchedulerSpecApplyConfiguration { + b.Policy = value + return b +} + +// WithProfile sets the Profile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Profile field is set to the value of the last call. +func (b *SchedulerSpecApplyConfiguration) WithProfile(value configv1.SchedulerProfile) *SchedulerSpecApplyConfiguration { + b.Profile = &value + return b +} + +// WithProfileCustomizations sets the ProfileCustomizations field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProfileCustomizations field is set to the value of the last call. +func (b *SchedulerSpecApplyConfiguration) WithProfileCustomizations(value *ProfileCustomizationsApplyConfiguration) *SchedulerSpecApplyConfiguration { + b.ProfileCustomizations = value + return b +} + +// WithDefaultNodeSelector sets the DefaultNodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DefaultNodeSelector field is set to the value of the last call. +func (b *SchedulerSpecApplyConfiguration) WithDefaultNodeSelector(value string) *SchedulerSpecApplyConfiguration { + b.DefaultNodeSelector = &value + return b +} + +// WithMastersSchedulable sets the MastersSchedulable field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MastersSchedulable field is set to the value of the last call. +func (b *SchedulerSpecApplyConfiguration) WithMastersSchedulable(value bool) *SchedulerSpecApplyConfiguration { + b.MastersSchedulable = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/secretnamereference.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/secretnamereference.go new file mode 100644 index 0000000000000..692056c6b8794 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/secretnamereference.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// SecretNameReferenceApplyConfiguration represents a declarative configuration of the SecretNameReference type for use +// with apply. +type SecretNameReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// SecretNameReferenceApplyConfiguration constructs a declarative configuration of the SecretNameReference type for use with +// apply. +func SecretNameReference() *SecretNameReferenceApplyConfiguration { + return &SecretNameReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *SecretNameReferenceApplyConfiguration) WithName(value string) *SecretNameReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/signaturestore.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/signaturestore.go new file mode 100644 index 0000000000000..918f13df6a709 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/signaturestore.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// SignatureStoreApplyConfiguration represents a declarative configuration of the SignatureStore type for use +// with apply. +type SignatureStoreApplyConfiguration struct { + URL *string `json:"url,omitempty"` + CA *ConfigMapNameReferenceApplyConfiguration `json:"ca,omitempty"` +} + +// SignatureStoreApplyConfiguration constructs a declarative configuration of the SignatureStore type for use with +// apply. +func SignatureStore() *SignatureStoreApplyConfiguration { + return &SignatureStoreApplyConfiguration{} +} + +// WithURL sets the URL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the URL field is set to the value of the last call. +func (b *SignatureStoreApplyConfiguration) WithURL(value string) *SignatureStoreApplyConfiguration { + b.URL = &value + return b +} + +// WithCA sets the CA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CA field is set to the value of the last call. +func (b *SignatureStoreApplyConfiguration) WithCA(value *ConfigMapNameReferenceApplyConfiguration) *SignatureStoreApplyConfiguration { + b.CA = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/templatereference.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/templatereference.go new file mode 100644 index 0000000000000..30112046a0dbc --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/templatereference.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// TemplateReferenceApplyConfiguration represents a declarative configuration of the TemplateReference type for use +// with apply. +type TemplateReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// TemplateReferenceApplyConfiguration constructs a declarative configuration of the TemplateReference type for use with +// apply. +func TemplateReference() *TemplateReferenceApplyConfiguration { + return &TemplateReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *TemplateReferenceApplyConfiguration) WithName(value string) *TemplateReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlsprofilespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlsprofilespec.go new file mode 100644 index 0000000000000..43590d0ef3643 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlsprofilespec.go @@ -0,0 +1,38 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// TLSProfileSpecApplyConfiguration represents a declarative configuration of the TLSProfileSpec type for use +// with apply. +type TLSProfileSpecApplyConfiguration struct { + Ciphers []string `json:"ciphers,omitempty"` + MinTLSVersion *configv1.TLSProtocolVersion `json:"minTLSVersion,omitempty"` +} + +// TLSProfileSpecApplyConfiguration constructs a declarative configuration of the TLSProfileSpec type for use with +// apply. +func TLSProfileSpec() *TLSProfileSpecApplyConfiguration { + return &TLSProfileSpecApplyConfiguration{} +} + +// WithCiphers adds the given value to the Ciphers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ciphers field. +func (b *TLSProfileSpecApplyConfiguration) WithCiphers(values ...string) *TLSProfileSpecApplyConfiguration { + for i := range values { + b.Ciphers = append(b.Ciphers, values[i]) + } + return b +} + +// WithMinTLSVersion sets the MinTLSVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinTLSVersion field is set to the value of the last call. +func (b *TLSProfileSpecApplyConfiguration) WithMinTLSVersion(value configv1.TLSProtocolVersion) *TLSProfileSpecApplyConfiguration { + b.MinTLSVersion = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlssecurityprofile.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlssecurityprofile.go new file mode 100644 index 0000000000000..e5806e33c4dd1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlssecurityprofile.go @@ -0,0 +1,63 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// TLSSecurityProfileApplyConfiguration represents a declarative configuration of the TLSSecurityProfile type for use +// with apply. +type TLSSecurityProfileApplyConfiguration struct { + Type *configv1.TLSProfileType `json:"type,omitempty"` + Old *configv1.OldTLSProfile `json:"old,omitempty"` + Intermediate *configv1.IntermediateTLSProfile `json:"intermediate,omitempty"` + Modern *configv1.ModernTLSProfile `json:"modern,omitempty"` + Custom *CustomTLSProfileApplyConfiguration `json:"custom,omitempty"` +} + +// TLSSecurityProfileApplyConfiguration constructs a declarative configuration of the TLSSecurityProfile type for use with +// apply. +func TLSSecurityProfile() *TLSSecurityProfileApplyConfiguration { + return &TLSSecurityProfileApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *TLSSecurityProfileApplyConfiguration) WithType(value configv1.TLSProfileType) *TLSSecurityProfileApplyConfiguration { + b.Type = &value + return b +} + +// WithOld sets the Old field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Old field is set to the value of the last call. +func (b *TLSSecurityProfileApplyConfiguration) WithOld(value configv1.OldTLSProfile) *TLSSecurityProfileApplyConfiguration { + b.Old = &value + return b +} + +// WithIntermediate sets the Intermediate field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Intermediate field is set to the value of the last call. +func (b *TLSSecurityProfileApplyConfiguration) WithIntermediate(value configv1.IntermediateTLSProfile) *TLSSecurityProfileApplyConfiguration { + b.Intermediate = &value + return b +} + +// WithModern sets the Modern field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Modern field is set to the value of the last call. +func (b *TLSSecurityProfileApplyConfiguration) WithModern(value configv1.ModernTLSProfile) *TLSSecurityProfileApplyConfiguration { + b.Modern = &value + return b +} + +// WithCustom sets the Custom field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Custom field is set to the value of the last call. +func (b *TLSSecurityProfileApplyConfiguration) WithCustom(value *CustomTLSProfileApplyConfiguration) *TLSSecurityProfileApplyConfiguration { + b.Custom = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmapping.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmapping.go new file mode 100644 index 0000000000000..dbd509f06820a --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmapping.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// TokenClaimMappingApplyConfiguration represents a declarative configuration of the TokenClaimMapping type for use +// with apply. +type TokenClaimMappingApplyConfiguration struct { + Claim *string `json:"claim,omitempty"` +} + +// TokenClaimMappingApplyConfiguration constructs a declarative configuration of the TokenClaimMapping type for use with +// apply. +func TokenClaimMapping() *TokenClaimMappingApplyConfiguration { + return &TokenClaimMappingApplyConfiguration{} +} + +// WithClaim sets the Claim field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Claim field is set to the value of the last call. +func (b *TokenClaimMappingApplyConfiguration) WithClaim(value string) *TokenClaimMappingApplyConfiguration { + b.Claim = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmappings.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmappings.go new file mode 100644 index 0000000000000..9b3b0bb56127d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmappings.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// TokenClaimMappingsApplyConfiguration represents a declarative configuration of the TokenClaimMappings type for use +// with apply. +type TokenClaimMappingsApplyConfiguration struct { + Username *UsernameClaimMappingApplyConfiguration `json:"username,omitempty"` + Groups *PrefixedClaimMappingApplyConfiguration `json:"groups,omitempty"` +} + +// TokenClaimMappingsApplyConfiguration constructs a declarative configuration of the TokenClaimMappings type for use with +// apply. +func TokenClaimMappings() *TokenClaimMappingsApplyConfiguration { + return &TokenClaimMappingsApplyConfiguration{} +} + +// WithUsername sets the Username field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Username field is set to the value of the last call. +func (b *TokenClaimMappingsApplyConfiguration) WithUsername(value *UsernameClaimMappingApplyConfiguration) *TokenClaimMappingsApplyConfiguration { + b.Username = value + return b +} + +// WithGroups sets the Groups field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Groups field is set to the value of the last call. +func (b *TokenClaimMappingsApplyConfiguration) WithGroups(value *PrefixedClaimMappingApplyConfiguration) *TokenClaimMappingsApplyConfiguration { + b.Groups = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimvalidationrule.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimvalidationrule.go new file mode 100644 index 0000000000000..74e9f61091f2b --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimvalidationrule.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// TokenClaimValidationRuleApplyConfiguration represents a declarative configuration of the TokenClaimValidationRule type for use +// with apply. +type TokenClaimValidationRuleApplyConfiguration struct { + Type *configv1.TokenValidationRuleType `json:"type,omitempty"` + RequiredClaim *TokenRequiredClaimApplyConfiguration `json:"requiredClaim,omitempty"` +} + +// TokenClaimValidationRuleApplyConfiguration constructs a declarative configuration of the TokenClaimValidationRule type for use with +// apply. +func TokenClaimValidationRule() *TokenClaimValidationRuleApplyConfiguration { + return &TokenClaimValidationRuleApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *TokenClaimValidationRuleApplyConfiguration) WithType(value configv1.TokenValidationRuleType) *TokenClaimValidationRuleApplyConfiguration { + b.Type = &value + return b +} + +// WithRequiredClaim sets the RequiredClaim field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RequiredClaim field is set to the value of the last call. +func (b *TokenClaimValidationRuleApplyConfiguration) WithRequiredClaim(value *TokenRequiredClaimApplyConfiguration) *TokenClaimValidationRuleApplyConfiguration { + b.RequiredClaim = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenconfig.go new file mode 100644 index 0000000000000..e1b6c4b511d32 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenconfig.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TokenConfigApplyConfiguration represents a declarative configuration of the TokenConfig type for use +// with apply. +type TokenConfigApplyConfiguration struct { + AccessTokenMaxAgeSeconds *int32 `json:"accessTokenMaxAgeSeconds,omitempty"` + AccessTokenInactivityTimeoutSeconds *int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty"` + AccessTokenInactivityTimeout *metav1.Duration `json:"accessTokenInactivityTimeout,omitempty"` +} + +// TokenConfigApplyConfiguration constructs a declarative configuration of the TokenConfig type for use with +// apply. +func TokenConfig() *TokenConfigApplyConfiguration { + return &TokenConfigApplyConfiguration{} +} + +// WithAccessTokenMaxAgeSeconds sets the AccessTokenMaxAgeSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AccessTokenMaxAgeSeconds field is set to the value of the last call. +func (b *TokenConfigApplyConfiguration) WithAccessTokenMaxAgeSeconds(value int32) *TokenConfigApplyConfiguration { + b.AccessTokenMaxAgeSeconds = &value + return b +} + +// WithAccessTokenInactivityTimeoutSeconds sets the AccessTokenInactivityTimeoutSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AccessTokenInactivityTimeoutSeconds field is set to the value of the last call. +func (b *TokenConfigApplyConfiguration) WithAccessTokenInactivityTimeoutSeconds(value int32) *TokenConfigApplyConfiguration { + b.AccessTokenInactivityTimeoutSeconds = &value + return b +} + +// WithAccessTokenInactivityTimeout sets the AccessTokenInactivityTimeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AccessTokenInactivityTimeout field is set to the value of the last call. +func (b *TokenConfigApplyConfiguration) WithAccessTokenInactivityTimeout(value metav1.Duration) *TokenConfigApplyConfiguration { + b.AccessTokenInactivityTimeout = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenissuer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenissuer.go new file mode 100644 index 0000000000000..68f590abc6ede --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenissuer.go @@ -0,0 +1,47 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// TokenIssuerApplyConfiguration represents a declarative configuration of the TokenIssuer type for use +// with apply. +type TokenIssuerApplyConfiguration struct { + URL *string `json:"issuerURL,omitempty"` + Audiences []configv1.TokenAudience `json:"audiences,omitempty"` + CertificateAuthority *ConfigMapNameReferenceApplyConfiguration `json:"issuerCertificateAuthority,omitempty"` +} + +// TokenIssuerApplyConfiguration constructs a declarative configuration of the TokenIssuer type for use with +// apply. +func TokenIssuer() *TokenIssuerApplyConfiguration { + return &TokenIssuerApplyConfiguration{} +} + +// WithURL sets the URL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the URL field is set to the value of the last call. +func (b *TokenIssuerApplyConfiguration) WithURL(value string) *TokenIssuerApplyConfiguration { + b.URL = &value + return b +} + +// WithAudiences adds the given value to the Audiences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Audiences field. +func (b *TokenIssuerApplyConfiguration) WithAudiences(values ...configv1.TokenAudience) *TokenIssuerApplyConfiguration { + for i := range values { + b.Audiences = append(b.Audiences, values[i]) + } + return b +} + +// WithCertificateAuthority sets the CertificateAuthority field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CertificateAuthority field is set to the value of the last call. +func (b *TokenIssuerApplyConfiguration) WithCertificateAuthority(value *ConfigMapNameReferenceApplyConfiguration) *TokenIssuerApplyConfiguration { + b.CertificateAuthority = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenrequiredclaim.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenrequiredclaim.go new file mode 100644 index 0000000000000..6dec5b2a19517 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenrequiredclaim.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// TokenRequiredClaimApplyConfiguration represents a declarative configuration of the TokenRequiredClaim type for use +// with apply. +type TokenRequiredClaimApplyConfiguration struct { + Claim *string `json:"claim,omitempty"` + RequiredValue *string `json:"requiredValue,omitempty"` +} + +// TokenRequiredClaimApplyConfiguration constructs a declarative configuration of the TokenRequiredClaim type for use with +// apply. +func TokenRequiredClaim() *TokenRequiredClaimApplyConfiguration { + return &TokenRequiredClaimApplyConfiguration{} +} + +// WithClaim sets the Claim field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Claim field is set to the value of the last call. +func (b *TokenRequiredClaimApplyConfiguration) WithClaim(value string) *TokenRequiredClaimApplyConfiguration { + b.Claim = &value + return b +} + +// WithRequiredValue sets the RequiredValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RequiredValue field is set to the value of the last call. +func (b *TokenRequiredClaimApplyConfiguration) WithRequiredValue(value string) *TokenRequiredClaimApplyConfiguration { + b.RequiredValue = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/update.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/update.go new file mode 100644 index 0000000000000..004d1bac22418 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/update.go @@ -0,0 +1,54 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// UpdateApplyConfiguration represents a declarative configuration of the Update type for use +// with apply. +type UpdateApplyConfiguration struct { + Architecture *configv1.ClusterVersionArchitecture `json:"architecture,omitempty"` + Version *string `json:"version,omitempty"` + Image *string `json:"image,omitempty"` + Force *bool `json:"force,omitempty"` +} + +// UpdateApplyConfiguration constructs a declarative configuration of the Update type for use with +// apply. +func Update() *UpdateApplyConfiguration { + return &UpdateApplyConfiguration{} +} + +// WithArchitecture sets the Architecture field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Architecture field is set to the value of the last call. +func (b *UpdateApplyConfiguration) WithArchitecture(value configv1.ClusterVersionArchitecture) *UpdateApplyConfiguration { + b.Architecture = &value + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *UpdateApplyConfiguration) WithVersion(value string) *UpdateApplyConfiguration { + b.Version = &value + return b +} + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *UpdateApplyConfiguration) WithImage(value string) *UpdateApplyConfiguration { + b.Image = &value + return b +} + +// WithForce sets the Force field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Force field is set to the value of the last call. +func (b *UpdateApplyConfiguration) WithForce(value bool) *UpdateApplyConfiguration { + b.Force = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/updatehistory.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/updatehistory.go new file mode 100644 index 0000000000000..b7998eb610ca7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/updatehistory.go @@ -0,0 +1,82 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// UpdateHistoryApplyConfiguration represents a declarative configuration of the UpdateHistory type for use +// with apply. +type UpdateHistoryApplyConfiguration struct { + State *configv1.UpdateState `json:"state,omitempty"` + StartedTime *metav1.Time `json:"startedTime,omitempty"` + CompletionTime *metav1.Time `json:"completionTime,omitempty"` + Version *string `json:"version,omitempty"` + Image *string `json:"image,omitempty"` + Verified *bool `json:"verified,omitempty"` + AcceptedRisks *string `json:"acceptedRisks,omitempty"` +} + +// UpdateHistoryApplyConfiguration constructs a declarative configuration of the UpdateHistory type for use with +// apply. +func UpdateHistory() *UpdateHistoryApplyConfiguration { + return &UpdateHistoryApplyConfiguration{} +} + +// WithState sets the State field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the State field is set to the value of the last call. +func (b *UpdateHistoryApplyConfiguration) WithState(value configv1.UpdateState) *UpdateHistoryApplyConfiguration { + b.State = &value + return b +} + +// WithStartedTime sets the StartedTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StartedTime field is set to the value of the last call. +func (b *UpdateHistoryApplyConfiguration) WithStartedTime(value metav1.Time) *UpdateHistoryApplyConfiguration { + b.StartedTime = &value + return b +} + +// WithCompletionTime sets the CompletionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CompletionTime field is set to the value of the last call. +func (b *UpdateHistoryApplyConfiguration) WithCompletionTime(value metav1.Time) *UpdateHistoryApplyConfiguration { + b.CompletionTime = &value + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *UpdateHistoryApplyConfiguration) WithVersion(value string) *UpdateHistoryApplyConfiguration { + b.Version = &value + return b +} + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *UpdateHistoryApplyConfiguration) WithImage(value string) *UpdateHistoryApplyConfiguration { + b.Image = &value + return b +} + +// WithVerified sets the Verified field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Verified field is set to the value of the last call. +func (b *UpdateHistoryApplyConfiguration) WithVerified(value bool) *UpdateHistoryApplyConfiguration { + b.Verified = &value + return b +} + +// WithAcceptedRisks sets the AcceptedRisks field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AcceptedRisks field is set to the value of the last call. +func (b *UpdateHistoryApplyConfiguration) WithAcceptedRisks(value string) *UpdateHistoryApplyConfiguration { + b.AcceptedRisks = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go new file mode 100644 index 0000000000000..e90a90117fc4e --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// UsernameClaimMappingApplyConfiguration represents a declarative configuration of the UsernameClaimMapping type for use +// with apply. +type UsernameClaimMappingApplyConfiguration struct { + TokenClaimMappingApplyConfiguration `json:",inline"` + PrefixPolicy *configv1.UsernamePrefixPolicy `json:"prefixPolicy,omitempty"` + Prefix *UsernamePrefixApplyConfiguration `json:"prefix,omitempty"` +} + +// UsernameClaimMappingApplyConfiguration constructs a declarative configuration of the UsernameClaimMapping type for use with +// apply. +func UsernameClaimMapping() *UsernameClaimMappingApplyConfiguration { + return &UsernameClaimMappingApplyConfiguration{} +} + +// WithClaim sets the Claim field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Claim field is set to the value of the last call. +func (b *UsernameClaimMappingApplyConfiguration) WithClaim(value string) *UsernameClaimMappingApplyConfiguration { + b.TokenClaimMappingApplyConfiguration.Claim = &value + return b +} + +// WithPrefixPolicy sets the PrefixPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PrefixPolicy field is set to the value of the last call. +func (b *UsernameClaimMappingApplyConfiguration) WithPrefixPolicy(value configv1.UsernamePrefixPolicy) *UsernameClaimMappingApplyConfiguration { + b.PrefixPolicy = &value + return b +} + +// WithPrefix sets the Prefix field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Prefix field is set to the value of the last call. +func (b *UsernameClaimMappingApplyConfiguration) WithPrefix(value *UsernamePrefixApplyConfiguration) *UsernameClaimMappingApplyConfiguration { + b.Prefix = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameprefix.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameprefix.go new file mode 100644 index 0000000000000..03720723bd374 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameprefix.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// UsernamePrefixApplyConfiguration represents a declarative configuration of the UsernamePrefix type for use +// with apply. +type UsernamePrefixApplyConfiguration struct { + PrefixString *string `json:"prefixString,omitempty"` +} + +// UsernamePrefixApplyConfiguration constructs a declarative configuration of the UsernamePrefix type for use with +// apply. +func UsernamePrefix() *UsernamePrefixApplyConfiguration { + return &UsernamePrefixApplyConfiguration{} +} + +// WithPrefixString sets the PrefixString field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PrefixString field is set to the value of the last call. +func (b *UsernamePrefixApplyConfiguration) WithPrefixString(value string) *UsernamePrefixApplyConfiguration { + b.PrefixString = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainhostgroup.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainhostgroup.go new file mode 100644 index 0000000000000..f590263a1f0c2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainhostgroup.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// VSphereFailureDomainHostGroupApplyConfiguration represents a declarative configuration of the VSphereFailureDomainHostGroup type for use +// with apply. +type VSphereFailureDomainHostGroupApplyConfiguration struct { + VMGroup *string `json:"vmGroup,omitempty"` + HostGroup *string `json:"hostGroup,omitempty"` + VMHostRule *string `json:"vmHostRule,omitempty"` +} + +// VSphereFailureDomainHostGroupApplyConfiguration constructs a declarative configuration of the VSphereFailureDomainHostGroup type for use with +// apply. +func VSphereFailureDomainHostGroup() *VSphereFailureDomainHostGroupApplyConfiguration { + return &VSphereFailureDomainHostGroupApplyConfiguration{} +} + +// WithVMGroup sets the VMGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VMGroup field is set to the value of the last call. +func (b *VSphereFailureDomainHostGroupApplyConfiguration) WithVMGroup(value string) *VSphereFailureDomainHostGroupApplyConfiguration { + b.VMGroup = &value + return b +} + +// WithHostGroup sets the HostGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostGroup field is set to the value of the last call. +func (b *VSphereFailureDomainHostGroupApplyConfiguration) WithHostGroup(value string) *VSphereFailureDomainHostGroupApplyConfiguration { + b.HostGroup = &value + return b +} + +// WithVMHostRule sets the VMHostRule field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VMHostRule field is set to the value of the last call. +func (b *VSphereFailureDomainHostGroupApplyConfiguration) WithVMHostRule(value string) *VSphereFailureDomainHostGroupApplyConfiguration { + b.VMHostRule = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainregionaffinity.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainregionaffinity.go new file mode 100644 index 0000000000000..bf923d829803e --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainregionaffinity.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// VSphereFailureDomainRegionAffinityApplyConfiguration represents a declarative configuration of the VSphereFailureDomainRegionAffinity type for use +// with apply. +type VSphereFailureDomainRegionAffinityApplyConfiguration struct { + Type *configv1.VSphereFailureDomainRegionType `json:"type,omitempty"` +} + +// VSphereFailureDomainRegionAffinityApplyConfiguration constructs a declarative configuration of the VSphereFailureDomainRegionAffinity type for use with +// apply. +func VSphereFailureDomainRegionAffinity() *VSphereFailureDomainRegionAffinityApplyConfiguration { + return &VSphereFailureDomainRegionAffinityApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *VSphereFailureDomainRegionAffinityApplyConfiguration) WithType(value configv1.VSphereFailureDomainRegionType) *VSphereFailureDomainRegionAffinityApplyConfiguration { + b.Type = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainzoneaffinity.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainzoneaffinity.go new file mode 100644 index 0000000000000..5bbbe95560d32 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainzoneaffinity.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// VSphereFailureDomainZoneAffinityApplyConfiguration represents a declarative configuration of the VSphereFailureDomainZoneAffinity type for use +// with apply. +type VSphereFailureDomainZoneAffinityApplyConfiguration struct { + Type *configv1.VSphereFailureDomainZoneType `json:"type,omitempty"` + HostGroup *VSphereFailureDomainHostGroupApplyConfiguration `json:"hostGroup,omitempty"` +} + +// VSphereFailureDomainZoneAffinityApplyConfiguration constructs a declarative configuration of the VSphereFailureDomainZoneAffinity type for use with +// apply. +func VSphereFailureDomainZoneAffinity() *VSphereFailureDomainZoneAffinityApplyConfiguration { + return &VSphereFailureDomainZoneAffinityApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *VSphereFailureDomainZoneAffinityApplyConfiguration) WithType(value configv1.VSphereFailureDomainZoneType) *VSphereFailureDomainZoneAffinityApplyConfiguration { + b.Type = &value + return b +} + +// WithHostGroup sets the HostGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostGroup field is set to the value of the last call. +func (b *VSphereFailureDomainZoneAffinityApplyConfiguration) WithHostGroup(value *VSphereFailureDomainHostGroupApplyConfiguration) *VSphereFailureDomainZoneAffinityApplyConfiguration { + b.HostGroup = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformfailuredomainspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformfailuredomainspec.go new file mode 100644 index 0000000000000..aeb2388825fda --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformfailuredomainspec.go @@ -0,0 +1,77 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// VSpherePlatformFailureDomainSpecApplyConfiguration represents a declarative configuration of the VSpherePlatformFailureDomainSpec type for use +// with apply. +type VSpherePlatformFailureDomainSpecApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Region *string `json:"region,omitempty"` + Zone *string `json:"zone,omitempty"` + RegionAffinity *VSphereFailureDomainRegionAffinityApplyConfiguration `json:"regionAffinity,omitempty"` + ZoneAffinity *VSphereFailureDomainZoneAffinityApplyConfiguration `json:"zoneAffinity,omitempty"` + Server *string `json:"server,omitempty"` + Topology *VSpherePlatformTopologyApplyConfiguration `json:"topology,omitempty"` +} + +// VSpherePlatformFailureDomainSpecApplyConfiguration constructs a declarative configuration of the VSpherePlatformFailureDomainSpec type for use with +// apply. +func VSpherePlatformFailureDomainSpec() *VSpherePlatformFailureDomainSpecApplyConfiguration { + return &VSpherePlatformFailureDomainSpecApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *VSpherePlatformFailureDomainSpecApplyConfiguration) WithName(value string) *VSpherePlatformFailureDomainSpecApplyConfiguration { + b.Name = &value + return b +} + +// WithRegion sets the Region field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Region field is set to the value of the last call. +func (b *VSpherePlatformFailureDomainSpecApplyConfiguration) WithRegion(value string) *VSpherePlatformFailureDomainSpecApplyConfiguration { + b.Region = &value + return b +} + +// WithZone sets the Zone field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Zone field is set to the value of the last call. +func (b *VSpherePlatformFailureDomainSpecApplyConfiguration) WithZone(value string) *VSpherePlatformFailureDomainSpecApplyConfiguration { + b.Zone = &value + return b +} + +// WithRegionAffinity sets the RegionAffinity field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RegionAffinity field is set to the value of the last call. +func (b *VSpherePlatformFailureDomainSpecApplyConfiguration) WithRegionAffinity(value *VSphereFailureDomainRegionAffinityApplyConfiguration) *VSpherePlatformFailureDomainSpecApplyConfiguration { + b.RegionAffinity = value + return b +} + +// WithZoneAffinity sets the ZoneAffinity field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ZoneAffinity field is set to the value of the last call. +func (b *VSpherePlatformFailureDomainSpecApplyConfiguration) WithZoneAffinity(value *VSphereFailureDomainZoneAffinityApplyConfiguration) *VSpherePlatformFailureDomainSpecApplyConfiguration { + b.ZoneAffinity = value + return b +} + +// WithServer sets the Server field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Server field is set to the value of the last call. +func (b *VSpherePlatformFailureDomainSpecApplyConfiguration) WithServer(value string) *VSpherePlatformFailureDomainSpecApplyConfiguration { + b.Server = &value + return b +} + +// WithTopology sets the Topology field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Topology field is set to the value of the last call. +func (b *VSpherePlatformFailureDomainSpecApplyConfiguration) WithTopology(value *VSpherePlatformTopologyApplyConfiguration) *VSpherePlatformFailureDomainSpecApplyConfiguration { + b.Topology = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformloadbalancer.go new file mode 100644 index 0000000000000..9eb2f57aabc23 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformloadbalancer.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// VSpherePlatformLoadBalancerApplyConfiguration represents a declarative configuration of the VSpherePlatformLoadBalancer type for use +// with apply. +type VSpherePlatformLoadBalancerApplyConfiguration struct { + Type *configv1.PlatformLoadBalancerType `json:"type,omitempty"` +} + +// VSpherePlatformLoadBalancerApplyConfiguration constructs a declarative configuration of the VSpherePlatformLoadBalancer type for use with +// apply. +func VSpherePlatformLoadBalancer() *VSpherePlatformLoadBalancerApplyConfiguration { + return &VSpherePlatformLoadBalancerApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *VSpherePlatformLoadBalancerApplyConfiguration) WithType(value configv1.PlatformLoadBalancerType) *VSpherePlatformLoadBalancerApplyConfiguration { + b.Type = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworking.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworking.go new file mode 100644 index 0000000000000..f83a0c50a7ad7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworking.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// VSpherePlatformNodeNetworkingApplyConfiguration represents a declarative configuration of the VSpherePlatformNodeNetworking type for use +// with apply. +type VSpherePlatformNodeNetworkingApplyConfiguration struct { + External *VSpherePlatformNodeNetworkingSpecApplyConfiguration `json:"external,omitempty"` + Internal *VSpherePlatformNodeNetworkingSpecApplyConfiguration `json:"internal,omitempty"` +} + +// VSpherePlatformNodeNetworkingApplyConfiguration constructs a declarative configuration of the VSpherePlatformNodeNetworking type for use with +// apply. +func VSpherePlatformNodeNetworking() *VSpherePlatformNodeNetworkingApplyConfiguration { + return &VSpherePlatformNodeNetworkingApplyConfiguration{} +} + +// WithExternal sets the External field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the External field is set to the value of the last call. +func (b *VSpherePlatformNodeNetworkingApplyConfiguration) WithExternal(value *VSpherePlatformNodeNetworkingSpecApplyConfiguration) *VSpherePlatformNodeNetworkingApplyConfiguration { + b.External = value + return b +} + +// WithInternal sets the Internal field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Internal field is set to the value of the last call. +func (b *VSpherePlatformNodeNetworkingApplyConfiguration) WithInternal(value *VSpherePlatformNodeNetworkingSpecApplyConfiguration) *VSpherePlatformNodeNetworkingApplyConfiguration { + b.Internal = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworkingspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworkingspec.go new file mode 100644 index 0000000000000..670448d3c19a8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworkingspec.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// VSpherePlatformNodeNetworkingSpecApplyConfiguration represents a declarative configuration of the VSpherePlatformNodeNetworkingSpec type for use +// with apply. +type VSpherePlatformNodeNetworkingSpecApplyConfiguration struct { + NetworkSubnetCIDR []string `json:"networkSubnetCidr,omitempty"` + Network *string `json:"network,omitempty"` + ExcludeNetworkSubnetCIDR []string `json:"excludeNetworkSubnetCidr,omitempty"` +} + +// VSpherePlatformNodeNetworkingSpecApplyConfiguration constructs a declarative configuration of the VSpherePlatformNodeNetworkingSpec type for use with +// apply. +func VSpherePlatformNodeNetworkingSpec() *VSpherePlatformNodeNetworkingSpecApplyConfiguration { + return &VSpherePlatformNodeNetworkingSpecApplyConfiguration{} +} + +// WithNetworkSubnetCIDR adds the given value to the NetworkSubnetCIDR field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NetworkSubnetCIDR field. +func (b *VSpherePlatformNodeNetworkingSpecApplyConfiguration) WithNetworkSubnetCIDR(values ...string) *VSpherePlatformNodeNetworkingSpecApplyConfiguration { + for i := range values { + b.NetworkSubnetCIDR = append(b.NetworkSubnetCIDR, values[i]) + } + return b +} + +// WithNetwork sets the Network field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Network field is set to the value of the last call. +func (b *VSpherePlatformNodeNetworkingSpecApplyConfiguration) WithNetwork(value string) *VSpherePlatformNodeNetworkingSpecApplyConfiguration { + b.Network = &value + return b +} + +// WithExcludeNetworkSubnetCIDR adds the given value to the ExcludeNetworkSubnetCIDR field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExcludeNetworkSubnetCIDR field. +func (b *VSpherePlatformNodeNetworkingSpecApplyConfiguration) WithExcludeNetworkSubnetCIDR(values ...string) *VSpherePlatformNodeNetworkingSpecApplyConfiguration { + for i := range values { + b.ExcludeNetworkSubnetCIDR = append(b.ExcludeNetworkSubnetCIDR, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformspec.go new file mode 100644 index 0000000000000..d0d191331eab5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformspec.go @@ -0,0 +1,88 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// VSpherePlatformSpecApplyConfiguration represents a declarative configuration of the VSpherePlatformSpec type for use +// with apply. +type VSpherePlatformSpecApplyConfiguration struct { + VCenters []VSpherePlatformVCenterSpecApplyConfiguration `json:"vcenters,omitempty"` + FailureDomains []VSpherePlatformFailureDomainSpecApplyConfiguration `json:"failureDomains,omitempty"` + NodeNetworking *VSpherePlatformNodeNetworkingApplyConfiguration `json:"nodeNetworking,omitempty"` + APIServerInternalIPs []configv1.IP `json:"apiServerInternalIPs,omitempty"` + IngressIPs []configv1.IP `json:"ingressIPs,omitempty"` + MachineNetworks []configv1.CIDR `json:"machineNetworks,omitempty"` +} + +// VSpherePlatformSpecApplyConfiguration constructs a declarative configuration of the VSpherePlatformSpec type for use with +// apply. +func VSpherePlatformSpec() *VSpherePlatformSpecApplyConfiguration { + return &VSpherePlatformSpecApplyConfiguration{} +} + +// WithVCenters adds the given value to the VCenters field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the VCenters field. +func (b *VSpherePlatformSpecApplyConfiguration) WithVCenters(values ...*VSpherePlatformVCenterSpecApplyConfiguration) *VSpherePlatformSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithVCenters") + } + b.VCenters = append(b.VCenters, *values[i]) + } + return b +} + +// WithFailureDomains adds the given value to the FailureDomains field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the FailureDomains field. +func (b *VSpherePlatformSpecApplyConfiguration) WithFailureDomains(values ...*VSpherePlatformFailureDomainSpecApplyConfiguration) *VSpherePlatformSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithFailureDomains") + } + b.FailureDomains = append(b.FailureDomains, *values[i]) + } + return b +} + +// WithNodeNetworking sets the NodeNetworking field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeNetworking field is set to the value of the last call. +func (b *VSpherePlatformSpecApplyConfiguration) WithNodeNetworking(value *VSpherePlatformNodeNetworkingApplyConfiguration) *VSpherePlatformSpecApplyConfiguration { + b.NodeNetworking = value + return b +} + +// WithAPIServerInternalIPs adds the given value to the APIServerInternalIPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the APIServerInternalIPs field. +func (b *VSpherePlatformSpecApplyConfiguration) WithAPIServerInternalIPs(values ...configv1.IP) *VSpherePlatformSpecApplyConfiguration { + for i := range values { + b.APIServerInternalIPs = append(b.APIServerInternalIPs, values[i]) + } + return b +} + +// WithIngressIPs adds the given value to the IngressIPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the IngressIPs field. +func (b *VSpherePlatformSpecApplyConfiguration) WithIngressIPs(values ...configv1.IP) *VSpherePlatformSpecApplyConfiguration { + for i := range values { + b.IngressIPs = append(b.IngressIPs, values[i]) + } + return b +} + +// WithMachineNetworks adds the given value to the MachineNetworks field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MachineNetworks field. +func (b *VSpherePlatformSpecApplyConfiguration) WithMachineNetworks(values ...configv1.CIDR) *VSpherePlatformSpecApplyConfiguration { + for i := range values { + b.MachineNetworks = append(b.MachineNetworks, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformstatus.go new file mode 100644 index 0000000000000..36696df716d2b --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformstatus.go @@ -0,0 +1,87 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// VSpherePlatformStatusApplyConfiguration represents a declarative configuration of the VSpherePlatformStatus type for use +// with apply. +type VSpherePlatformStatusApplyConfiguration struct { + APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` + APIServerInternalIPs []string `json:"apiServerInternalIPs,omitempty"` + IngressIP *string `json:"ingressIP,omitempty"` + IngressIPs []string `json:"ingressIPs,omitempty"` + NodeDNSIP *string `json:"nodeDNSIP,omitempty"` + LoadBalancer *VSpherePlatformLoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"` + MachineNetworks []configv1.CIDR `json:"machineNetworks,omitempty"` +} + +// VSpherePlatformStatusApplyConfiguration constructs a declarative configuration of the VSpherePlatformStatus type for use with +// apply. +func VSpherePlatformStatus() *VSpherePlatformStatusApplyConfiguration { + return &VSpherePlatformStatusApplyConfiguration{} +} + +// WithAPIServerInternalIP sets the APIServerInternalIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIServerInternalIP field is set to the value of the last call. +func (b *VSpherePlatformStatusApplyConfiguration) WithAPIServerInternalIP(value string) *VSpherePlatformStatusApplyConfiguration { + b.APIServerInternalIP = &value + return b +} + +// WithAPIServerInternalIPs adds the given value to the APIServerInternalIPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the APIServerInternalIPs field. +func (b *VSpherePlatformStatusApplyConfiguration) WithAPIServerInternalIPs(values ...string) *VSpherePlatformStatusApplyConfiguration { + for i := range values { + b.APIServerInternalIPs = append(b.APIServerInternalIPs, values[i]) + } + return b +} + +// WithIngressIP sets the IngressIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IngressIP field is set to the value of the last call. +func (b *VSpherePlatformStatusApplyConfiguration) WithIngressIP(value string) *VSpherePlatformStatusApplyConfiguration { + b.IngressIP = &value + return b +} + +// WithIngressIPs adds the given value to the IngressIPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the IngressIPs field. +func (b *VSpherePlatformStatusApplyConfiguration) WithIngressIPs(values ...string) *VSpherePlatformStatusApplyConfiguration { + for i := range values { + b.IngressIPs = append(b.IngressIPs, values[i]) + } + return b +} + +// WithNodeDNSIP sets the NodeDNSIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeDNSIP field is set to the value of the last call. +func (b *VSpherePlatformStatusApplyConfiguration) WithNodeDNSIP(value string) *VSpherePlatformStatusApplyConfiguration { + b.NodeDNSIP = &value + return b +} + +// WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LoadBalancer field is set to the value of the last call. +func (b *VSpherePlatformStatusApplyConfiguration) WithLoadBalancer(value *VSpherePlatformLoadBalancerApplyConfiguration) *VSpherePlatformStatusApplyConfiguration { + b.LoadBalancer = value + return b +} + +// WithMachineNetworks adds the given value to the MachineNetworks field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MachineNetworks field. +func (b *VSpherePlatformStatusApplyConfiguration) WithMachineNetworks(values ...configv1.CIDR) *VSpherePlatformStatusApplyConfiguration { + for i := range values { + b.MachineNetworks = append(b.MachineNetworks, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformtopology.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformtopology.go new file mode 100644 index 0000000000000..a3036a5cfea81 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformtopology.go @@ -0,0 +1,79 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// VSpherePlatformTopologyApplyConfiguration represents a declarative configuration of the VSpherePlatformTopology type for use +// with apply. +type VSpherePlatformTopologyApplyConfiguration struct { + Datacenter *string `json:"datacenter,omitempty"` + ComputeCluster *string `json:"computeCluster,omitempty"` + Networks []string `json:"networks,omitempty"` + Datastore *string `json:"datastore,omitempty"` + ResourcePool *string `json:"resourcePool,omitempty"` + Folder *string `json:"folder,omitempty"` + Template *string `json:"template,omitempty"` +} + +// VSpherePlatformTopologyApplyConfiguration constructs a declarative configuration of the VSpherePlatformTopology type for use with +// apply. +func VSpherePlatformTopology() *VSpherePlatformTopologyApplyConfiguration { + return &VSpherePlatformTopologyApplyConfiguration{} +} + +// WithDatacenter sets the Datacenter field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Datacenter field is set to the value of the last call. +func (b *VSpherePlatformTopologyApplyConfiguration) WithDatacenter(value string) *VSpherePlatformTopologyApplyConfiguration { + b.Datacenter = &value + return b +} + +// WithComputeCluster sets the ComputeCluster field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ComputeCluster field is set to the value of the last call. +func (b *VSpherePlatformTopologyApplyConfiguration) WithComputeCluster(value string) *VSpherePlatformTopologyApplyConfiguration { + b.ComputeCluster = &value + return b +} + +// WithNetworks adds the given value to the Networks field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Networks field. +func (b *VSpherePlatformTopologyApplyConfiguration) WithNetworks(values ...string) *VSpherePlatformTopologyApplyConfiguration { + for i := range values { + b.Networks = append(b.Networks, values[i]) + } + return b +} + +// WithDatastore sets the Datastore field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Datastore field is set to the value of the last call. +func (b *VSpherePlatformTopologyApplyConfiguration) WithDatastore(value string) *VSpherePlatformTopologyApplyConfiguration { + b.Datastore = &value + return b +} + +// WithResourcePool sets the ResourcePool field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourcePool field is set to the value of the last call. +func (b *VSpherePlatformTopologyApplyConfiguration) WithResourcePool(value string) *VSpherePlatformTopologyApplyConfiguration { + b.ResourcePool = &value + return b +} + +// WithFolder sets the Folder field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Folder field is set to the value of the last call. +func (b *VSpherePlatformTopologyApplyConfiguration) WithFolder(value string) *VSpherePlatformTopologyApplyConfiguration { + b.Folder = &value + return b +} + +// WithTemplate sets the Template field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Template field is set to the value of the last call. +func (b *VSpherePlatformTopologyApplyConfiguration) WithTemplate(value string) *VSpherePlatformTopologyApplyConfiguration { + b.Template = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformvcenterspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformvcenterspec.go new file mode 100644 index 0000000000000..ff652761863cf --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformvcenterspec.go @@ -0,0 +1,43 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// VSpherePlatformVCenterSpecApplyConfiguration represents a declarative configuration of the VSpherePlatformVCenterSpec type for use +// with apply. +type VSpherePlatformVCenterSpecApplyConfiguration struct { + Server *string `json:"server,omitempty"` + Port *int32 `json:"port,omitempty"` + Datacenters []string `json:"datacenters,omitempty"` +} + +// VSpherePlatformVCenterSpecApplyConfiguration constructs a declarative configuration of the VSpherePlatformVCenterSpec type for use with +// apply. +func VSpherePlatformVCenterSpec() *VSpherePlatformVCenterSpecApplyConfiguration { + return &VSpherePlatformVCenterSpecApplyConfiguration{} +} + +// WithServer sets the Server field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Server field is set to the value of the last call. +func (b *VSpherePlatformVCenterSpecApplyConfiguration) WithServer(value string) *VSpherePlatformVCenterSpecApplyConfiguration { + b.Server = &value + return b +} + +// WithPort sets the Port field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Port field is set to the value of the last call. +func (b *VSpherePlatformVCenterSpecApplyConfiguration) WithPort(value int32) *VSpherePlatformVCenterSpecApplyConfiguration { + b.Port = &value + return b +} + +// WithDatacenters adds the given value to the Datacenters field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Datacenters field. +func (b *VSpherePlatformVCenterSpecApplyConfiguration) WithDatacenters(values ...string) *VSpherePlatformVCenterSpecApplyConfiguration { + for i := range values { + b.Datacenters = append(b.Datacenters, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/webhooktokenauthenticator.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/webhooktokenauthenticator.go new file mode 100644 index 0000000000000..4ed9e2d2d4907 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/webhooktokenauthenticator.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// WebhookTokenAuthenticatorApplyConfiguration represents a declarative configuration of the WebhookTokenAuthenticator type for use +// with apply. +type WebhookTokenAuthenticatorApplyConfiguration struct { + KubeConfig *SecretNameReferenceApplyConfiguration `json:"kubeConfig,omitempty"` +} + +// WebhookTokenAuthenticatorApplyConfiguration constructs a declarative configuration of the WebhookTokenAuthenticator type for use with +// apply. +func WebhookTokenAuthenticator() *WebhookTokenAuthenticatorApplyConfiguration { + return &WebhookTokenAuthenticatorApplyConfiguration{} +} + +// WithKubeConfig sets the KubeConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KubeConfig field is set to the value of the last call. +func (b *WebhookTokenAuthenticatorApplyConfiguration) WithKubeConfig(value *SecretNameReferenceApplyConfiguration) *WebhookTokenAuthenticatorApplyConfiguration { + b.KubeConfig = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/backup.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/backup.go new file mode 100644 index 0000000000000..b4982de150658 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/backup.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// BackupApplyConfiguration represents a declarative configuration of the Backup type for use +// with apply. +type BackupApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *BackupSpecApplyConfiguration `json:"spec,omitempty"` + Status *configv1alpha1.BackupStatus `json:"status,omitempty"` +} + +// Backup constructs a declarative configuration of the Backup type for use with +// apply. +func Backup(name string) *BackupApplyConfiguration { + b := &BackupApplyConfiguration{} + b.WithName(name) + b.WithKind("Backup") + b.WithAPIVersion("config.openshift.io/v1alpha1") + return b +} + +// ExtractBackup extracts the applied configuration owned by fieldManager from +// backup. If no managedFields are found in backup for fieldManager, a +// BackupApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// backup must be a unmodified Backup API object that was retrieved from the Kubernetes API. +// ExtractBackup provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractBackup(backup *configv1alpha1.Backup, fieldManager string) (*BackupApplyConfiguration, error) { + return extractBackup(backup, fieldManager, "") +} + +// ExtractBackupStatus is the same as ExtractBackup except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractBackupStatus(backup *configv1alpha1.Backup, fieldManager string) (*BackupApplyConfiguration, error) { + return extractBackup(backup, fieldManager, "status") +} + +func extractBackup(backup *configv1alpha1.Backup, fieldManager string, subresource string) (*BackupApplyConfiguration, error) { + b := &BackupApplyConfiguration{} + err := managedfields.ExtractInto(backup, internal.Parser().Type("com.github.openshift.api.config.v1alpha1.Backup"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(backup.Name) + + b.WithKind("Backup") + b.WithAPIVersion("config.openshift.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *BackupApplyConfiguration) WithKind(value string) *BackupApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *BackupApplyConfiguration) WithAPIVersion(value string) *BackupApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *BackupApplyConfiguration) WithName(value string) *BackupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *BackupApplyConfiguration) WithGenerateName(value string) *BackupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *BackupApplyConfiguration) WithNamespace(value string) *BackupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *BackupApplyConfiguration) WithUID(value types.UID) *BackupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *BackupApplyConfiguration) WithResourceVersion(value string) *BackupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *BackupApplyConfiguration) WithGeneration(value int64) *BackupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *BackupApplyConfiguration) WithCreationTimestamp(value metav1.Time) *BackupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *BackupApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *BackupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *BackupApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *BackupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *BackupApplyConfiguration) WithLabels(entries map[string]string) *BackupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *BackupApplyConfiguration) WithAnnotations(entries map[string]string) *BackupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *BackupApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *BackupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *BackupApplyConfiguration) WithFinalizers(values ...string) *BackupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *BackupApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *BackupApplyConfiguration) WithSpec(value *BackupSpecApplyConfiguration) *BackupApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *BackupApplyConfiguration) WithStatus(value configv1alpha1.BackupStatus) *BackupApplyConfiguration { + b.Status = &value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *BackupApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/backupspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/backupspec.go new file mode 100644 index 0000000000000..9bca4aca57770 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/backupspec.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// BackupSpecApplyConfiguration represents a declarative configuration of the BackupSpec type for use +// with apply. +type BackupSpecApplyConfiguration struct { + EtcdBackupSpec *EtcdBackupSpecApplyConfiguration `json:"etcd,omitempty"` +} + +// BackupSpecApplyConfiguration constructs a declarative configuration of the BackupSpec type for use with +// apply. +func BackupSpec() *BackupSpecApplyConfiguration { + return &BackupSpecApplyConfiguration{} +} + +// WithEtcdBackupSpec sets the EtcdBackupSpec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EtcdBackupSpec field is set to the value of the last call. +func (b *BackupSpecApplyConfiguration) WithEtcdBackupSpec(value *EtcdBackupSpecApplyConfiguration) *BackupSpecApplyConfiguration { + b.EtcdBackupSpec = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicy.go new file mode 100644 index 0000000000000..f3d7fdb7758b5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicy.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterImagePolicyApplyConfiguration represents a declarative configuration of the ClusterImagePolicy type for use +// with apply. +type ClusterImagePolicyApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ClusterImagePolicySpecApplyConfiguration `json:"spec,omitempty"` + Status *ClusterImagePolicyStatusApplyConfiguration `json:"status,omitempty"` +} + +// ClusterImagePolicy constructs a declarative configuration of the ClusterImagePolicy type for use with +// apply. +func ClusterImagePolicy(name string) *ClusterImagePolicyApplyConfiguration { + b := &ClusterImagePolicyApplyConfiguration{} + b.WithName(name) + b.WithKind("ClusterImagePolicy") + b.WithAPIVersion("config.openshift.io/v1alpha1") + return b +} + +// ExtractClusterImagePolicy extracts the applied configuration owned by fieldManager from +// clusterImagePolicy. If no managedFields are found in clusterImagePolicy for fieldManager, a +// ClusterImagePolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// clusterImagePolicy must be a unmodified ClusterImagePolicy API object that was retrieved from the Kubernetes API. +// ExtractClusterImagePolicy provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractClusterImagePolicy(clusterImagePolicy *configv1alpha1.ClusterImagePolicy, fieldManager string) (*ClusterImagePolicyApplyConfiguration, error) { + return extractClusterImagePolicy(clusterImagePolicy, fieldManager, "") +} + +// ExtractClusterImagePolicyStatus is the same as ExtractClusterImagePolicy except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractClusterImagePolicyStatus(clusterImagePolicy *configv1alpha1.ClusterImagePolicy, fieldManager string) (*ClusterImagePolicyApplyConfiguration, error) { + return extractClusterImagePolicy(clusterImagePolicy, fieldManager, "status") +} + +func extractClusterImagePolicy(clusterImagePolicy *configv1alpha1.ClusterImagePolicy, fieldManager string, subresource string) (*ClusterImagePolicyApplyConfiguration, error) { + b := &ClusterImagePolicyApplyConfiguration{} + err := managedfields.ExtractInto(clusterImagePolicy, internal.Parser().Type("com.github.openshift.api.config.v1alpha1.ClusterImagePolicy"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(clusterImagePolicy.Name) + + b.WithKind("ClusterImagePolicy") + b.WithAPIVersion("config.openshift.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithKind(value string) *ClusterImagePolicyApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithAPIVersion(value string) *ClusterImagePolicyApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithName(value string) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithGenerateName(value string) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithNamespace(value string) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithUID(value types.UID) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithResourceVersion(value string) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithGeneration(value int64) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ClusterImagePolicyApplyConfiguration) WithLabels(entries map[string]string) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ClusterImagePolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ClusterImagePolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ClusterImagePolicyApplyConfiguration) WithFinalizers(values ...string) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ClusterImagePolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithSpec(value *ClusterImagePolicySpecApplyConfiguration) *ClusterImagePolicyApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithStatus(value *ClusterImagePolicyStatusApplyConfiguration) *ClusterImagePolicyApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterImagePolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicyspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicyspec.go new file mode 100644 index 0000000000000..e4a3470c45736 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicyspec.go @@ -0,0 +1,38 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// ClusterImagePolicySpecApplyConfiguration represents a declarative configuration of the ClusterImagePolicySpec type for use +// with apply. +type ClusterImagePolicySpecApplyConfiguration struct { + Scopes []configv1alpha1.ImageScope `json:"scopes,omitempty"` + Policy *PolicyApplyConfiguration `json:"policy,omitempty"` +} + +// ClusterImagePolicySpecApplyConfiguration constructs a declarative configuration of the ClusterImagePolicySpec type for use with +// apply. +func ClusterImagePolicySpec() *ClusterImagePolicySpecApplyConfiguration { + return &ClusterImagePolicySpecApplyConfiguration{} +} + +// WithScopes adds the given value to the Scopes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Scopes field. +func (b *ClusterImagePolicySpecApplyConfiguration) WithScopes(values ...configv1alpha1.ImageScope) *ClusterImagePolicySpecApplyConfiguration { + for i := range values { + b.Scopes = append(b.Scopes, values[i]) + } + return b +} + +// WithPolicy sets the Policy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Policy field is set to the value of the last call. +func (b *ClusterImagePolicySpecApplyConfiguration) WithPolicy(value *PolicyApplyConfiguration) *ClusterImagePolicySpecApplyConfiguration { + b.Policy = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicystatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicystatus.go new file mode 100644 index 0000000000000..b5b4a82581723 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicystatus.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterImagePolicyStatusApplyConfiguration represents a declarative configuration of the ClusterImagePolicyStatus type for use +// with apply. +type ClusterImagePolicyStatusApplyConfiguration struct { + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// ClusterImagePolicyStatusApplyConfiguration constructs a declarative configuration of the ClusterImagePolicyStatus type for use with +// apply. +func ClusterImagePolicyStatus() *ClusterImagePolicyStatusApplyConfiguration { + return &ClusterImagePolicyStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ClusterImagePolicyStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ClusterImagePolicyStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/etcdbackupspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/etcdbackupspec.go new file mode 100644 index 0000000000000..ab631f302cbae --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/etcdbackupspec.go @@ -0,0 +1,50 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// EtcdBackupSpecApplyConfiguration represents a declarative configuration of the EtcdBackupSpec type for use +// with apply. +type EtcdBackupSpecApplyConfiguration struct { + Schedule *string `json:"schedule,omitempty"` + TimeZone *string `json:"timeZone,omitempty"` + RetentionPolicy *RetentionPolicyApplyConfiguration `json:"retentionPolicy,omitempty"` + PVCName *string `json:"pvcName,omitempty"` +} + +// EtcdBackupSpecApplyConfiguration constructs a declarative configuration of the EtcdBackupSpec type for use with +// apply. +func EtcdBackupSpec() *EtcdBackupSpecApplyConfiguration { + return &EtcdBackupSpecApplyConfiguration{} +} + +// WithSchedule sets the Schedule field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Schedule field is set to the value of the last call. +func (b *EtcdBackupSpecApplyConfiguration) WithSchedule(value string) *EtcdBackupSpecApplyConfiguration { + b.Schedule = &value + return b +} + +// WithTimeZone sets the TimeZone field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TimeZone field is set to the value of the last call. +func (b *EtcdBackupSpecApplyConfiguration) WithTimeZone(value string) *EtcdBackupSpecApplyConfiguration { + b.TimeZone = &value + return b +} + +// WithRetentionPolicy sets the RetentionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RetentionPolicy field is set to the value of the last call. +func (b *EtcdBackupSpecApplyConfiguration) WithRetentionPolicy(value *RetentionPolicyApplyConfiguration) *EtcdBackupSpecApplyConfiguration { + b.RetentionPolicy = value + return b +} + +// WithPVCName sets the PVCName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PVCName field is set to the value of the last call. +func (b *EtcdBackupSpecApplyConfiguration) WithPVCName(value string) *EtcdBackupSpecApplyConfiguration { + b.PVCName = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/fulciocawithrekor.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/fulciocawithrekor.go new file mode 100644 index 0000000000000..2a907a7e9729b --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/fulciocawithrekor.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// FulcioCAWithRekorApplyConfiguration represents a declarative configuration of the FulcioCAWithRekor type for use +// with apply. +type FulcioCAWithRekorApplyConfiguration struct { + FulcioCAData []byte `json:"fulcioCAData,omitempty"` + RekorKeyData []byte `json:"rekorKeyData,omitempty"` + FulcioSubject *PolicyFulcioSubjectApplyConfiguration `json:"fulcioSubject,omitempty"` +} + +// FulcioCAWithRekorApplyConfiguration constructs a declarative configuration of the FulcioCAWithRekor type for use with +// apply. +func FulcioCAWithRekor() *FulcioCAWithRekorApplyConfiguration { + return &FulcioCAWithRekorApplyConfiguration{} +} + +// WithFulcioCAData adds the given value to the FulcioCAData field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the FulcioCAData field. +func (b *FulcioCAWithRekorApplyConfiguration) WithFulcioCAData(values ...byte) *FulcioCAWithRekorApplyConfiguration { + for i := range values { + b.FulcioCAData = append(b.FulcioCAData, values[i]) + } + return b +} + +// WithRekorKeyData adds the given value to the RekorKeyData field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RekorKeyData field. +func (b *FulcioCAWithRekorApplyConfiguration) WithRekorKeyData(values ...byte) *FulcioCAWithRekorApplyConfiguration { + for i := range values { + b.RekorKeyData = append(b.RekorKeyData, values[i]) + } + return b +} + +// WithFulcioSubject sets the FulcioSubject field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FulcioSubject field is set to the value of the last call. +func (b *FulcioCAWithRekorApplyConfiguration) WithFulcioSubject(value *PolicyFulcioSubjectApplyConfiguration) *FulcioCAWithRekorApplyConfiguration { + b.FulcioSubject = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/gatherconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/gatherconfig.go new file mode 100644 index 0000000000000..e870fe6c26332 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/gatherconfig.go @@ -0,0 +1,38 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// GatherConfigApplyConfiguration represents a declarative configuration of the GatherConfig type for use +// with apply. +type GatherConfigApplyConfiguration struct { + DataPolicy *configv1alpha1.DataPolicy `json:"dataPolicy,omitempty"` + DisabledGatherers []string `json:"disabledGatherers,omitempty"` +} + +// GatherConfigApplyConfiguration constructs a declarative configuration of the GatherConfig type for use with +// apply. +func GatherConfig() *GatherConfigApplyConfiguration { + return &GatherConfigApplyConfiguration{} +} + +// WithDataPolicy sets the DataPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DataPolicy field is set to the value of the last call. +func (b *GatherConfigApplyConfiguration) WithDataPolicy(value configv1alpha1.DataPolicy) *GatherConfigApplyConfiguration { + b.DataPolicy = &value + return b +} + +// WithDisabledGatherers adds the given value to the DisabledGatherers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the DisabledGatherers field. +func (b *GatherConfigApplyConfiguration) WithDisabledGatherers(values ...string) *GatherConfigApplyConfiguration { + for i := range values { + b.DisabledGatherers = append(b.DisabledGatherers, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicy.go new file mode 100644 index 0000000000000..6595aa782a421 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicy.go @@ -0,0 +1,248 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ImagePolicyApplyConfiguration represents a declarative configuration of the ImagePolicy type for use +// with apply. +type ImagePolicyApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ImagePolicySpecApplyConfiguration `json:"spec,omitempty"` + Status *ImagePolicyStatusApplyConfiguration `json:"status,omitempty"` +} + +// ImagePolicy constructs a declarative configuration of the ImagePolicy type for use with +// apply. +func ImagePolicy(name, namespace string) *ImagePolicyApplyConfiguration { + b := &ImagePolicyApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("ImagePolicy") + b.WithAPIVersion("config.openshift.io/v1alpha1") + return b +} + +// ExtractImagePolicy extracts the applied configuration owned by fieldManager from +// imagePolicy. If no managedFields are found in imagePolicy for fieldManager, a +// ImagePolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// imagePolicy must be a unmodified ImagePolicy API object that was retrieved from the Kubernetes API. +// ExtractImagePolicy provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractImagePolicy(imagePolicy *configv1alpha1.ImagePolicy, fieldManager string) (*ImagePolicyApplyConfiguration, error) { + return extractImagePolicy(imagePolicy, fieldManager, "") +} + +// ExtractImagePolicyStatus is the same as ExtractImagePolicy except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractImagePolicyStatus(imagePolicy *configv1alpha1.ImagePolicy, fieldManager string) (*ImagePolicyApplyConfiguration, error) { + return extractImagePolicy(imagePolicy, fieldManager, "status") +} + +func extractImagePolicy(imagePolicy *configv1alpha1.ImagePolicy, fieldManager string, subresource string) (*ImagePolicyApplyConfiguration, error) { + b := &ImagePolicyApplyConfiguration{} + err := managedfields.ExtractInto(imagePolicy, internal.Parser().Type("com.github.openshift.api.config.v1alpha1.ImagePolicy"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(imagePolicy.Name) + b.WithNamespace(imagePolicy.Namespace) + + b.WithKind("ImagePolicy") + b.WithAPIVersion("config.openshift.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithKind(value string) *ImagePolicyApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithAPIVersion(value string) *ImagePolicyApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithName(value string) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithGenerateName(value string) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithNamespace(value string) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithUID(value types.UID) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithResourceVersion(value string) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithGeneration(value int64) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ImagePolicyApplyConfiguration) WithLabels(entries map[string]string) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ImagePolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ImagePolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ImagePolicyApplyConfiguration) WithFinalizers(values ...string) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ImagePolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithSpec(value *ImagePolicySpecApplyConfiguration) *ImagePolicyApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithStatus(value *ImagePolicyStatusApplyConfiguration) *ImagePolicyApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ImagePolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyspec.go new file mode 100644 index 0000000000000..ac08e9cf4e53a --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyspec.go @@ -0,0 +1,38 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// ImagePolicySpecApplyConfiguration represents a declarative configuration of the ImagePolicySpec type for use +// with apply. +type ImagePolicySpecApplyConfiguration struct { + Scopes []configv1alpha1.ImageScope `json:"scopes,omitempty"` + Policy *PolicyApplyConfiguration `json:"policy,omitempty"` +} + +// ImagePolicySpecApplyConfiguration constructs a declarative configuration of the ImagePolicySpec type for use with +// apply. +func ImagePolicySpec() *ImagePolicySpecApplyConfiguration { + return &ImagePolicySpecApplyConfiguration{} +} + +// WithScopes adds the given value to the Scopes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Scopes field. +func (b *ImagePolicySpecApplyConfiguration) WithScopes(values ...configv1alpha1.ImageScope) *ImagePolicySpecApplyConfiguration { + for i := range values { + b.Scopes = append(b.Scopes, values[i]) + } + return b +} + +// WithPolicy sets the Policy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Policy field is set to the value of the last call. +func (b *ImagePolicySpecApplyConfiguration) WithPolicy(value *PolicyApplyConfiguration) *ImagePolicySpecApplyConfiguration { + b.Policy = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicystatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicystatus.go new file mode 100644 index 0000000000000..d5c664772d953 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicystatus.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ImagePolicyStatusApplyConfiguration represents a declarative configuration of the ImagePolicyStatus type for use +// with apply. +type ImagePolicyStatusApplyConfiguration struct { + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// ImagePolicyStatusApplyConfiguration constructs a declarative configuration of the ImagePolicyStatus type for use with +// apply. +func ImagePolicyStatus() *ImagePolicyStatusApplyConfiguration { + return &ImagePolicyStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ImagePolicyStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ImagePolicyStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagather.go new file mode 100644 index 0000000000000..cf4ae1f006d73 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagather.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// InsightsDataGatherApplyConfiguration represents a declarative configuration of the InsightsDataGather type for use +// with apply. +type InsightsDataGatherApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *InsightsDataGatherSpecApplyConfiguration `json:"spec,omitempty"` + Status *configv1alpha1.InsightsDataGatherStatus `json:"status,omitempty"` +} + +// InsightsDataGather constructs a declarative configuration of the InsightsDataGather type for use with +// apply. +func InsightsDataGather(name string) *InsightsDataGatherApplyConfiguration { + b := &InsightsDataGatherApplyConfiguration{} + b.WithName(name) + b.WithKind("InsightsDataGather") + b.WithAPIVersion("config.openshift.io/v1alpha1") + return b +} + +// ExtractInsightsDataGather extracts the applied configuration owned by fieldManager from +// insightsDataGather. If no managedFields are found in insightsDataGather for fieldManager, a +// InsightsDataGatherApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// insightsDataGather must be a unmodified InsightsDataGather API object that was retrieved from the Kubernetes API. +// ExtractInsightsDataGather provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractInsightsDataGather(insightsDataGather *configv1alpha1.InsightsDataGather, fieldManager string) (*InsightsDataGatherApplyConfiguration, error) { + return extractInsightsDataGather(insightsDataGather, fieldManager, "") +} + +// ExtractInsightsDataGatherStatus is the same as ExtractInsightsDataGather except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractInsightsDataGatherStatus(insightsDataGather *configv1alpha1.InsightsDataGather, fieldManager string) (*InsightsDataGatherApplyConfiguration, error) { + return extractInsightsDataGather(insightsDataGather, fieldManager, "status") +} + +func extractInsightsDataGather(insightsDataGather *configv1alpha1.InsightsDataGather, fieldManager string, subresource string) (*InsightsDataGatherApplyConfiguration, error) { + b := &InsightsDataGatherApplyConfiguration{} + err := managedfields.ExtractInto(insightsDataGather, internal.Parser().Type("com.github.openshift.api.config.v1alpha1.InsightsDataGather"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(insightsDataGather.Name) + + b.WithKind("InsightsDataGather") + b.WithAPIVersion("config.openshift.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithKind(value string) *InsightsDataGatherApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithAPIVersion(value string) *InsightsDataGatherApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithName(value string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithGenerateName(value string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithNamespace(value string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithUID(value types.UID) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithResourceVersion(value string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithGeneration(value int64) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithCreationTimestamp(value metav1.Time) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *InsightsDataGatherApplyConfiguration) WithLabels(entries map[string]string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *InsightsDataGatherApplyConfiguration) WithAnnotations(entries map[string]string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *InsightsDataGatherApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *InsightsDataGatherApplyConfiguration) WithFinalizers(values ...string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *InsightsDataGatherApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithSpec(value *InsightsDataGatherSpecApplyConfiguration) *InsightsDataGatherApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithStatus(value configv1alpha1.InsightsDataGatherStatus) *InsightsDataGatherApplyConfiguration { + b.Status = &value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *InsightsDataGatherApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagatherspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagatherspec.go new file mode 100644 index 0000000000000..51b0ba6295ae8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagatherspec.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// InsightsDataGatherSpecApplyConfiguration represents a declarative configuration of the InsightsDataGatherSpec type for use +// with apply. +type InsightsDataGatherSpecApplyConfiguration struct { + GatherConfig *GatherConfigApplyConfiguration `json:"gatherConfig,omitempty"` +} + +// InsightsDataGatherSpecApplyConfiguration constructs a declarative configuration of the InsightsDataGatherSpec type for use with +// apply. +func InsightsDataGatherSpec() *InsightsDataGatherSpecApplyConfiguration { + return &InsightsDataGatherSpecApplyConfiguration{} +} + +// WithGatherConfig sets the GatherConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GatherConfig field is set to the value of the last call. +func (b *InsightsDataGatherSpecApplyConfiguration) WithGatherConfig(value *GatherConfigApplyConfiguration) *InsightsDataGatherSpecApplyConfiguration { + b.GatherConfig = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policy.go new file mode 100644 index 0000000000000..61e4856642940 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policy.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// PolicyApplyConfiguration represents a declarative configuration of the Policy type for use +// with apply. +type PolicyApplyConfiguration struct { + RootOfTrust *PolicyRootOfTrustApplyConfiguration `json:"rootOfTrust,omitempty"` + SignedIdentity *PolicyIdentityApplyConfiguration `json:"signedIdentity,omitempty"` +} + +// PolicyApplyConfiguration constructs a declarative configuration of the Policy type for use with +// apply. +func Policy() *PolicyApplyConfiguration { + return &PolicyApplyConfiguration{} +} + +// WithRootOfTrust sets the RootOfTrust field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RootOfTrust field is set to the value of the last call. +func (b *PolicyApplyConfiguration) WithRootOfTrust(value *PolicyRootOfTrustApplyConfiguration) *PolicyApplyConfiguration { + b.RootOfTrust = value + return b +} + +// WithSignedIdentity sets the SignedIdentity field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SignedIdentity field is set to the value of the last call. +func (b *PolicyApplyConfiguration) WithSignedIdentity(value *PolicyIdentityApplyConfiguration) *PolicyApplyConfiguration { + b.SignedIdentity = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyfulciosubject.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyfulciosubject.go new file mode 100644 index 0000000000000..c4608f47a2dd5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyfulciosubject.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// PolicyFulcioSubjectApplyConfiguration represents a declarative configuration of the PolicyFulcioSubject type for use +// with apply. +type PolicyFulcioSubjectApplyConfiguration struct { + OIDCIssuer *string `json:"oidcIssuer,omitempty"` + SignedEmail *string `json:"signedEmail,omitempty"` +} + +// PolicyFulcioSubjectApplyConfiguration constructs a declarative configuration of the PolicyFulcioSubject type for use with +// apply. +func PolicyFulcioSubject() *PolicyFulcioSubjectApplyConfiguration { + return &PolicyFulcioSubjectApplyConfiguration{} +} + +// WithOIDCIssuer sets the OIDCIssuer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OIDCIssuer field is set to the value of the last call. +func (b *PolicyFulcioSubjectApplyConfiguration) WithOIDCIssuer(value string) *PolicyFulcioSubjectApplyConfiguration { + b.OIDCIssuer = &value + return b +} + +// WithSignedEmail sets the SignedEmail field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SignedEmail field is set to the value of the last call. +func (b *PolicyFulcioSubjectApplyConfiguration) WithSignedEmail(value string) *PolicyFulcioSubjectApplyConfiguration { + b.SignedEmail = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyidentity.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyidentity.go new file mode 100644 index 0000000000000..c03a2d6634023 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyidentity.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// PolicyIdentityApplyConfiguration represents a declarative configuration of the PolicyIdentity type for use +// with apply. +type PolicyIdentityApplyConfiguration struct { + MatchPolicy *configv1alpha1.IdentityMatchPolicy `json:"matchPolicy,omitempty"` + PolicyMatchExactRepository *PolicyMatchExactRepositoryApplyConfiguration `json:"exactRepository,omitempty"` + PolicyMatchRemapIdentity *PolicyMatchRemapIdentityApplyConfiguration `json:"remapIdentity,omitempty"` +} + +// PolicyIdentityApplyConfiguration constructs a declarative configuration of the PolicyIdentity type for use with +// apply. +func PolicyIdentity() *PolicyIdentityApplyConfiguration { + return &PolicyIdentityApplyConfiguration{} +} + +// WithMatchPolicy sets the MatchPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchPolicy field is set to the value of the last call. +func (b *PolicyIdentityApplyConfiguration) WithMatchPolicy(value configv1alpha1.IdentityMatchPolicy) *PolicyIdentityApplyConfiguration { + b.MatchPolicy = &value + return b +} + +// WithPolicyMatchExactRepository sets the PolicyMatchExactRepository field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PolicyMatchExactRepository field is set to the value of the last call. +func (b *PolicyIdentityApplyConfiguration) WithPolicyMatchExactRepository(value *PolicyMatchExactRepositoryApplyConfiguration) *PolicyIdentityApplyConfiguration { + b.PolicyMatchExactRepository = value + return b +} + +// WithPolicyMatchRemapIdentity sets the PolicyMatchRemapIdentity field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PolicyMatchRemapIdentity field is set to the value of the last call. +func (b *PolicyIdentityApplyConfiguration) WithPolicyMatchRemapIdentity(value *PolicyMatchRemapIdentityApplyConfiguration) *PolicyIdentityApplyConfiguration { + b.PolicyMatchRemapIdentity = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policymatchexactrepository.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policymatchexactrepository.go new file mode 100644 index 0000000000000..58870d5eb867d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policymatchexactrepository.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// PolicyMatchExactRepositoryApplyConfiguration represents a declarative configuration of the PolicyMatchExactRepository type for use +// with apply. +type PolicyMatchExactRepositoryApplyConfiguration struct { + Repository *configv1alpha1.IdentityRepositoryPrefix `json:"repository,omitempty"` +} + +// PolicyMatchExactRepositoryApplyConfiguration constructs a declarative configuration of the PolicyMatchExactRepository type for use with +// apply. +func PolicyMatchExactRepository() *PolicyMatchExactRepositoryApplyConfiguration { + return &PolicyMatchExactRepositoryApplyConfiguration{} +} + +// WithRepository sets the Repository field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Repository field is set to the value of the last call. +func (b *PolicyMatchExactRepositoryApplyConfiguration) WithRepository(value configv1alpha1.IdentityRepositoryPrefix) *PolicyMatchExactRepositoryApplyConfiguration { + b.Repository = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policymatchremapidentity.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policymatchremapidentity.go new file mode 100644 index 0000000000000..09075d0bea7ef --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policymatchremapidentity.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// PolicyMatchRemapIdentityApplyConfiguration represents a declarative configuration of the PolicyMatchRemapIdentity type for use +// with apply. +type PolicyMatchRemapIdentityApplyConfiguration struct { + Prefix *configv1alpha1.IdentityRepositoryPrefix `json:"prefix,omitempty"` + SignedPrefix *configv1alpha1.IdentityRepositoryPrefix `json:"signedPrefix,omitempty"` +} + +// PolicyMatchRemapIdentityApplyConfiguration constructs a declarative configuration of the PolicyMatchRemapIdentity type for use with +// apply. +func PolicyMatchRemapIdentity() *PolicyMatchRemapIdentityApplyConfiguration { + return &PolicyMatchRemapIdentityApplyConfiguration{} +} + +// WithPrefix sets the Prefix field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Prefix field is set to the value of the last call. +func (b *PolicyMatchRemapIdentityApplyConfiguration) WithPrefix(value configv1alpha1.IdentityRepositoryPrefix) *PolicyMatchRemapIdentityApplyConfiguration { + b.Prefix = &value + return b +} + +// WithSignedPrefix sets the SignedPrefix field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SignedPrefix field is set to the value of the last call. +func (b *PolicyMatchRemapIdentityApplyConfiguration) WithSignedPrefix(value configv1alpha1.IdentityRepositoryPrefix) *PolicyMatchRemapIdentityApplyConfiguration { + b.SignedPrefix = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyrootoftrust.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyrootoftrust.go new file mode 100644 index 0000000000000..c525e16670dff --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyrootoftrust.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// PolicyRootOfTrustApplyConfiguration represents a declarative configuration of the PolicyRootOfTrust type for use +// with apply. +type PolicyRootOfTrustApplyConfiguration struct { + PolicyType *configv1alpha1.PolicyType `json:"policyType,omitempty"` + PublicKey *PublicKeyApplyConfiguration `json:"publicKey,omitempty"` + FulcioCAWithRekor *FulcioCAWithRekorApplyConfiguration `json:"fulcioCAWithRekor,omitempty"` +} + +// PolicyRootOfTrustApplyConfiguration constructs a declarative configuration of the PolicyRootOfTrust type for use with +// apply. +func PolicyRootOfTrust() *PolicyRootOfTrustApplyConfiguration { + return &PolicyRootOfTrustApplyConfiguration{} +} + +// WithPolicyType sets the PolicyType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PolicyType field is set to the value of the last call. +func (b *PolicyRootOfTrustApplyConfiguration) WithPolicyType(value configv1alpha1.PolicyType) *PolicyRootOfTrustApplyConfiguration { + b.PolicyType = &value + return b +} + +// WithPublicKey sets the PublicKey field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PublicKey field is set to the value of the last call. +func (b *PolicyRootOfTrustApplyConfiguration) WithPublicKey(value *PublicKeyApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { + b.PublicKey = value + return b +} + +// WithFulcioCAWithRekor sets the FulcioCAWithRekor field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FulcioCAWithRekor field is set to the value of the last call. +func (b *PolicyRootOfTrustApplyConfiguration) WithFulcioCAWithRekor(value *FulcioCAWithRekorApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { + b.FulcioCAWithRekor = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/publickey.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/publickey.go new file mode 100644 index 0000000000000..91665a90b7407 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/publickey.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// PublicKeyApplyConfiguration represents a declarative configuration of the PublicKey type for use +// with apply. +type PublicKeyApplyConfiguration struct { + KeyData []byte `json:"keyData,omitempty"` + RekorKeyData []byte `json:"rekorKeyData,omitempty"` +} + +// PublicKeyApplyConfiguration constructs a declarative configuration of the PublicKey type for use with +// apply. +func PublicKey() *PublicKeyApplyConfiguration { + return &PublicKeyApplyConfiguration{} +} + +// WithKeyData adds the given value to the KeyData field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the KeyData field. +func (b *PublicKeyApplyConfiguration) WithKeyData(values ...byte) *PublicKeyApplyConfiguration { + for i := range values { + b.KeyData = append(b.KeyData, values[i]) + } + return b +} + +// WithRekorKeyData adds the given value to the RekorKeyData field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RekorKeyData field. +func (b *PublicKeyApplyConfiguration) WithRekorKeyData(values ...byte) *PublicKeyApplyConfiguration { + for i := range values { + b.RekorKeyData = append(b.RekorKeyData, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/retentionnumberconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/retentionnumberconfig.go new file mode 100644 index 0000000000000..f6a787171789f --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/retentionnumberconfig.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// RetentionNumberConfigApplyConfiguration represents a declarative configuration of the RetentionNumberConfig type for use +// with apply. +type RetentionNumberConfigApplyConfiguration struct { + MaxNumberOfBackups *int `json:"maxNumberOfBackups,omitempty"` +} + +// RetentionNumberConfigApplyConfiguration constructs a declarative configuration of the RetentionNumberConfig type for use with +// apply. +func RetentionNumberConfig() *RetentionNumberConfigApplyConfiguration { + return &RetentionNumberConfigApplyConfiguration{} +} + +// WithMaxNumberOfBackups sets the MaxNumberOfBackups field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxNumberOfBackups field is set to the value of the last call. +func (b *RetentionNumberConfigApplyConfiguration) WithMaxNumberOfBackups(value int) *RetentionNumberConfigApplyConfiguration { + b.MaxNumberOfBackups = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/retentionpolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/retentionpolicy.go new file mode 100644 index 0000000000000..981fb25737e07 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/retentionpolicy.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// RetentionPolicyApplyConfiguration represents a declarative configuration of the RetentionPolicy type for use +// with apply. +type RetentionPolicyApplyConfiguration struct { + RetentionType *configv1alpha1.RetentionType `json:"retentionType,omitempty"` + RetentionNumber *RetentionNumberConfigApplyConfiguration `json:"retentionNumber,omitempty"` + RetentionSize *RetentionSizeConfigApplyConfiguration `json:"retentionSize,omitempty"` +} + +// RetentionPolicyApplyConfiguration constructs a declarative configuration of the RetentionPolicy type for use with +// apply. +func RetentionPolicy() *RetentionPolicyApplyConfiguration { + return &RetentionPolicyApplyConfiguration{} +} + +// WithRetentionType sets the RetentionType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RetentionType field is set to the value of the last call. +func (b *RetentionPolicyApplyConfiguration) WithRetentionType(value configv1alpha1.RetentionType) *RetentionPolicyApplyConfiguration { + b.RetentionType = &value + return b +} + +// WithRetentionNumber sets the RetentionNumber field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RetentionNumber field is set to the value of the last call. +func (b *RetentionPolicyApplyConfiguration) WithRetentionNumber(value *RetentionNumberConfigApplyConfiguration) *RetentionPolicyApplyConfiguration { + b.RetentionNumber = value + return b +} + +// WithRetentionSize sets the RetentionSize field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RetentionSize field is set to the value of the last call. +func (b *RetentionPolicyApplyConfiguration) WithRetentionSize(value *RetentionSizeConfigApplyConfiguration) *RetentionPolicyApplyConfiguration { + b.RetentionSize = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/retentionsizeconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/retentionsizeconfig.go new file mode 100644 index 0000000000000..96b723be4c6be --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/retentionsizeconfig.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// RetentionSizeConfigApplyConfiguration represents a declarative configuration of the RetentionSizeConfig type for use +// with apply. +type RetentionSizeConfigApplyConfiguration struct { + MaxSizeOfBackupsGb *int `json:"maxSizeOfBackupsGb,omitempty"` +} + +// RetentionSizeConfigApplyConfiguration constructs a declarative configuration of the RetentionSizeConfig type for use with +// apply. +func RetentionSizeConfig() *RetentionSizeConfigApplyConfiguration { + return &RetentionSizeConfigApplyConfiguration{} +} + +// WithMaxSizeOfBackupsGb sets the MaxSizeOfBackupsGb field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxSizeOfBackupsGb field is set to the value of the last call. +func (b *RetentionSizeConfigApplyConfiguration) WithMaxSizeOfBackupsGb(value int) *RetentionSizeConfigApplyConfiguration { + b.MaxSizeOfBackupsGb = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go new file mode 100644 index 0000000000000..2cfbb4beb5646 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go @@ -0,0 +1,4315 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package internal + +import ( + fmt "fmt" + sync "sync" + + typed "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +func Parser() *typed.Parser { + parserOnce.Do(func() { + var err error + parser, err = typed.NewParser(schemaYAML) + if err != nil { + panic(fmt.Sprintf("Failed to parse schema: %v", err)) + } + }) + return parser +} + +var parserOnce sync.Once +var parser *typed.Parser +var schemaYAML = typed.YAMLObject(`types: +- name: com.github.openshift.api.config.v1.APIServer + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.APIServerSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1.APIServerStatus + default: {} +- name: com.github.openshift.api.config.v1.APIServerEncryption + map: + fields: + - name: type + type: + scalar: string +- name: com.github.openshift.api.config.v1.APIServerNamedServingCert + map: + fields: + - name: names + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: servingCertificate + type: + namedType: com.github.openshift.api.config.v1.SecretNameReference + default: {} +- name: com.github.openshift.api.config.v1.APIServerServingCerts + map: + fields: + - name: namedCertificates + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.APIServerNamedServingCert + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.APIServerSpec + map: + fields: + - name: additionalCORSAllowedOrigins + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: audit + type: + namedType: com.github.openshift.api.config.v1.Audit + default: {} + - name: clientCA + type: + namedType: com.github.openshift.api.config.v1.ConfigMapNameReference + default: {} + - name: encryption + type: + namedType: com.github.openshift.api.config.v1.APIServerEncryption + default: {} + - name: servingCerts + type: + namedType: com.github.openshift.api.config.v1.APIServerServingCerts + default: {} + - name: tlsSecurityProfile + type: + namedType: com.github.openshift.api.config.v1.TLSSecurityProfile +- name: com.github.openshift.api.config.v1.APIServerStatus + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1.AWSDNSSpec + map: + fields: + - name: privateZoneIAMRole + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.AWSIngressSpec + map: + fields: + - name: type + type: + scalar: string + unions: + - discriminator: type +- name: com.github.openshift.api.config.v1.AWSPlatformSpec + map: + fields: + - name: serviceEndpoints + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.AWSServiceEndpoint + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.AWSPlatformStatus + map: + fields: + - name: cloudLoadBalancerConfig + type: + namedType: com.github.openshift.api.config.v1.CloudLoadBalancerConfig + default: + dnsType: PlatformDefault + - name: region + type: + scalar: string + default: "" + - name: resourceTags + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.AWSResourceTag + elementRelationship: atomic + - name: serviceEndpoints + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.AWSServiceEndpoint + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.AWSResourceTag + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: value + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.AWSServiceEndpoint + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: url + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.AlibabaCloudPlatformSpec + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1.AlibabaCloudPlatformStatus + map: + fields: + - name: region + type: + scalar: string + default: "" + - name: resourceGroupID + type: + scalar: string + - name: resourceTags + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.AlibabaCloudResourceTag + elementRelationship: associative + keys: + - key +- name: com.github.openshift.api.config.v1.AlibabaCloudResourceTag + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: value + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.Audit + map: + fields: + - name: customRules + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.AuditCustomRule + elementRelationship: associative + keys: + - group + - name: profile + type: + scalar: string +- name: com.github.openshift.api.config.v1.AuditCustomRule + map: + fields: + - name: group + type: + scalar: string + default: "" + - name: profile + type: + scalar: string +- name: com.github.openshift.api.config.v1.Authentication + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.AuthenticationSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1.AuthenticationStatus + default: {} +- name: com.github.openshift.api.config.v1.AuthenticationSpec + map: + fields: + - name: oauthMetadata + type: + namedType: com.github.openshift.api.config.v1.ConfigMapNameReference + default: {} + - name: oidcProviders + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.OIDCProvider + elementRelationship: associative + keys: + - name + - name: serviceAccountIssuer + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" + - name: webhookTokenAuthenticator + type: + namedType: com.github.openshift.api.config.v1.WebhookTokenAuthenticator + - name: webhookTokenAuthenticators + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.DeprecatedWebhookTokenAuthenticator + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.AuthenticationStatus + map: + fields: + - name: integratedOAuthMetadata + type: + namedType: com.github.openshift.api.config.v1.ConfigMapNameReference + default: {} + - name: oidcClients + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.OIDCClientStatus + elementRelationship: associative + keys: + - componentNamespace + - componentName +- name: com.github.openshift.api.config.v1.AzurePlatformSpec + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1.AzurePlatformStatus + map: + fields: + - name: armEndpoint + type: + scalar: string + - name: cloudName + type: + scalar: string + - name: networkResourceGroupName + type: + scalar: string + - name: resourceGroupName + type: + scalar: string + default: "" + - name: resourceTags + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.AzureResourceTag + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.AzureResourceTag + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: value + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.BareMetalPlatformLoadBalancer + map: + fields: + - name: type + type: + scalar: string + default: OpenShiftManagedDefault + unions: + - discriminator: type +- name: com.github.openshift.api.config.v1.BareMetalPlatformSpec + map: + fields: + - name: apiServerInternalIPs + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: ingressIPs + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: machineNetworks + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.BareMetalPlatformStatus + map: + fields: + - name: apiServerInternalIP + type: + scalar: string + - name: apiServerInternalIPs + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: ingressIP + type: + scalar: string + - name: ingressIPs + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: loadBalancer + type: + namedType: com.github.openshift.api.config.v1.BareMetalPlatformLoadBalancer + default: + type: OpenShiftManagedDefault + - name: machineNetworks + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: nodeDNSIP + type: + scalar: string +- name: com.github.openshift.api.config.v1.BasicAuthIdentityProvider + map: + fields: + - name: ca + type: + namedType: com.github.openshift.api.config.v1.ConfigMapNameReference + default: {} + - name: tlsClientCert + type: + namedType: com.github.openshift.api.config.v1.SecretNameReference + default: {} + - name: tlsClientKey + type: + namedType: com.github.openshift.api.config.v1.SecretNameReference + default: {} + - name: url + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.Build + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.BuildSpec + default: {} +- name: com.github.openshift.api.config.v1.BuildDefaults + map: + fields: + - name: defaultProxy + type: + namedType: com.github.openshift.api.config.v1.ProxySpec + - name: env + type: + list: + elementType: + namedType: io.k8s.api.core.v1.EnvVar + elementRelationship: atomic + - name: gitProxy + type: + namedType: com.github.openshift.api.config.v1.ProxySpec + - name: imageLabels + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.ImageLabel + elementRelationship: atomic + - name: resources + type: + namedType: io.k8s.api.core.v1.ResourceRequirements + default: {} +- name: com.github.openshift.api.config.v1.BuildOverrides + map: + fields: + - name: forcePull + type: + scalar: boolean + - name: imageLabels + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.ImageLabel + elementRelationship: atomic + - name: nodeSelector + type: + map: + elementType: + scalar: string + - name: tolerations + type: + list: + elementType: + namedType: io.k8s.api.core.v1.Toleration + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.BuildSpec + map: + fields: + - name: additionalTrustedCA + type: + namedType: com.github.openshift.api.config.v1.ConfigMapNameReference + default: {} + - name: buildDefaults + type: + namedType: com.github.openshift.api.config.v1.BuildDefaults + default: {} + - name: buildOverrides + type: + namedType: com.github.openshift.api.config.v1.BuildOverrides + default: {} +- name: com.github.openshift.api.config.v1.CloudControllerManagerStatus + map: + fields: + - name: state + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.CloudLoadBalancerConfig + map: + fields: + - name: clusterHosted + type: + namedType: com.github.openshift.api.config.v1.CloudLoadBalancerIPs + - name: dnsType + type: + scalar: string + default: PlatformDefault + unions: + - discriminator: dnsType + fields: + - fieldName: clusterHosted + discriminatorValue: ClusterHosted +- name: com.github.openshift.api.config.v1.CloudLoadBalancerIPs + map: + fields: + - name: apiIntLoadBalancerIPs + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: apiLoadBalancerIPs + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: ingressLoadBalancerIPs + type: + list: + elementType: + scalar: string + elementRelationship: associative +- name: com.github.openshift.api.config.v1.ClusterCondition + map: + fields: + - name: promql + type: + namedType: com.github.openshift.api.config.v1.PromQLClusterCondition + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.ClusterNetworkEntry + map: + fields: + - name: cidr + type: + scalar: string + default: "" + - name: hostPrefix + type: + scalar: numeric +- name: com.github.openshift.api.config.v1.ClusterOperator + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.ClusterOperatorSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1.ClusterOperatorStatus + default: {} +- name: com.github.openshift.api.config.v1.ClusterOperatorSpec + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1.ClusterOperatorStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.ClusterOperatorStatusCondition + elementRelationship: associative + keys: + - type + - name: extension + type: + namedType: __untyped_atomic_ + - name: relatedObjects + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.ObjectReference + elementRelationship: atomic + - name: versions + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.OperandVersion + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.ClusterOperatorStatusCondition + map: + fields: + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message + type: + scalar: string + - name: reason + type: + scalar: string + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.ClusterVersion + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.ClusterVersionSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1.ClusterVersionStatus + default: {} +- name: com.github.openshift.api.config.v1.ClusterVersionCapabilitiesSpec + map: + fields: + - name: additionalEnabledCapabilities + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: baselineCapabilitySet + type: + scalar: string +- name: com.github.openshift.api.config.v1.ClusterVersionCapabilitiesStatus + map: + fields: + - name: enabledCapabilities + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: knownCapabilities + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.ClusterVersionSpec + map: + fields: + - name: capabilities + type: + namedType: com.github.openshift.api.config.v1.ClusterVersionCapabilitiesSpec + - name: channel + type: + scalar: string + - name: clusterID + type: + scalar: string + default: "" + - name: desiredUpdate + type: + namedType: com.github.openshift.api.config.v1.Update + - name: overrides + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.ComponentOverride + elementRelationship: associative + keys: + - kind + - group + - namespace + - name + - name: signatureStores + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.SignatureStore + elementRelationship: associative + keys: + - url + - name: upstream + type: + scalar: string +- name: com.github.openshift.api.config.v1.ClusterVersionStatus + map: + fields: + - name: availableUpdates + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.Release + elementRelationship: atomic + - name: capabilities + type: + namedType: com.github.openshift.api.config.v1.ClusterVersionCapabilitiesStatus + default: {} + - name: conditionalUpdates + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.ConditionalUpdate + elementRelationship: atomic + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.ClusterOperatorStatusCondition + elementRelationship: associative + keys: + - type + - name: desired + type: + namedType: com.github.openshift.api.config.v1.Release + default: {} + - name: history + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.UpdateHistory + elementRelationship: atomic + - name: observedGeneration + type: + scalar: numeric + default: 0 + - name: versionHash + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.ComponentOverride + map: + fields: + - name: group + type: + scalar: string + default: "" + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: namespace + type: + scalar: string + default: "" + - name: unmanaged + type: + scalar: boolean + default: false +- name: com.github.openshift.api.config.v1.ComponentRouteSpec + map: + fields: + - name: hostname + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: namespace + type: + scalar: string + default: "" + - name: servingCertKeyPairSecret + type: + namedType: com.github.openshift.api.config.v1.SecretNameReference + default: {} +- name: com.github.openshift.api.config.v1.ComponentRouteStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: consumingUsers + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: currentHostnames + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: defaultHostname + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: namespace + type: + scalar: string + default: "" + - name: relatedObjects + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.ObjectReference + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.ConditionalUpdate + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: release + type: + namedType: com.github.openshift.api.config.v1.Release + default: {} + - name: risks + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.ConditionalUpdateRisk + elementRelationship: associative + keys: + - name +- name: com.github.openshift.api.config.v1.ConditionalUpdateRisk + map: + fields: + - name: matchingRules + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.ClusterCondition + elementRelationship: atomic + - name: message + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: url + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.ConfigMapFileReference + map: + fields: + - name: key + type: + scalar: string + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.ConfigMapNameReference + map: + fields: + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.Console + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.ConsoleSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1.ConsoleStatus + default: {} +- name: com.github.openshift.api.config.v1.ConsoleAuthentication + map: + fields: + - name: logoutRedirect + type: + scalar: string +- name: com.github.openshift.api.config.v1.ConsoleSpec + map: + fields: + - name: authentication + type: + namedType: com.github.openshift.api.config.v1.ConsoleAuthentication + default: {} +- name: com.github.openshift.api.config.v1.ConsoleStatus + map: + fields: + - name: consoleURL + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.CustomFeatureGates + map: + fields: + - name: disabled + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: enabled + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.CustomTLSProfile + map: + fields: + - name: ciphers + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: minTLSVersion + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.DNS + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.DNSSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1.DNSStatus + default: {} +- name: com.github.openshift.api.config.v1.DNSPlatformSpec + map: + fields: + - name: aws + type: + namedType: com.github.openshift.api.config.v1.AWSDNSSpec + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: aws + discriminatorValue: AWS +- name: com.github.openshift.api.config.v1.DNSSpec + map: + fields: + - name: baseDomain + type: + scalar: string + default: "" + - name: platform + type: + namedType: com.github.openshift.api.config.v1.DNSPlatformSpec + default: {} + - name: privateZone + type: + namedType: com.github.openshift.api.config.v1.DNSZone + - name: publicZone + type: + namedType: com.github.openshift.api.config.v1.DNSZone +- name: com.github.openshift.api.config.v1.DNSStatus + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1.DNSZone + map: + fields: + - name: id + type: + scalar: string + - name: tags + type: + map: + elementType: + scalar: string +- name: com.github.openshift.api.config.v1.DeprecatedWebhookTokenAuthenticator + map: + fields: + - name: kubeConfig + type: + namedType: com.github.openshift.api.config.v1.SecretNameReference + default: {} +- name: com.github.openshift.api.config.v1.EquinixMetalPlatformSpec + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1.EquinixMetalPlatformStatus + map: + fields: + - name: apiServerInternalIP + type: + scalar: string + - name: ingressIP + type: + scalar: string +- name: com.github.openshift.api.config.v1.ExternalIPConfig + map: + fields: + - name: autoAssignCIDRs + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: policy + type: + namedType: com.github.openshift.api.config.v1.ExternalIPPolicy +- name: com.github.openshift.api.config.v1.ExternalIPPolicy + map: + fields: + - name: allowedCIDRs + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: rejectedCIDRs + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.ExternalPlatformSpec + map: + fields: + - name: platformName + type: + scalar: string + default: Unknown +- name: com.github.openshift.api.config.v1.ExternalPlatformStatus + map: + fields: + - name: cloudControllerManager + type: + namedType: com.github.openshift.api.config.v1.CloudControllerManagerStatus + default: {} +- name: com.github.openshift.api.config.v1.FeatureGate + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.FeatureGateSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1.FeatureGateStatus + default: {} +- name: com.github.openshift.api.config.v1.FeatureGateAttributes + map: + fields: + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.FeatureGateDetails + map: + fields: + - name: disabled + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.FeatureGateAttributes + elementRelationship: atomic + - name: enabled + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.FeatureGateAttributes + elementRelationship: atomic + - name: version + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.FeatureGateSpec + map: + fields: + - name: customNoUpgrade + type: + namedType: com.github.openshift.api.config.v1.CustomFeatureGates + - name: featureSet + type: + scalar: string + unions: + - discriminator: featureSet + fields: + - fieldName: customNoUpgrade + discriminatorValue: CustomNoUpgrade +- name: com.github.openshift.api.config.v1.FeatureGateStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: featureGates + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.FeatureGateDetails + elementRelationship: associative + keys: + - version +- name: com.github.openshift.api.config.v1.GCPPlatformSpec + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1.GCPPlatformStatus + map: + fields: + - name: cloudLoadBalancerConfig + type: + namedType: com.github.openshift.api.config.v1.CloudLoadBalancerConfig + default: + dnsType: PlatformDefault + - name: projectID + type: + scalar: string + default: "" + - name: region + type: + scalar: string + default: "" + - name: resourceLabels + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.GCPResourceLabel + elementRelationship: associative + keys: + - key + - name: resourceTags + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.GCPResourceTag + elementRelationship: associative + keys: + - key +- name: com.github.openshift.api.config.v1.GCPResourceLabel + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: value + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.GCPResourceTag + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: parentID + type: + scalar: string + default: "" + - name: value + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.GitHubIdentityProvider + map: + fields: + - name: ca + type: + namedType: com.github.openshift.api.config.v1.ConfigMapNameReference + default: {} + - name: clientID + type: + scalar: string + default: "" + - name: clientSecret + type: + namedType: com.github.openshift.api.config.v1.SecretNameReference + default: {} + - name: hostname + type: + scalar: string + default: "" + - name: organizations + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: teams + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.GitLabIdentityProvider + map: + fields: + - name: ca + type: + namedType: com.github.openshift.api.config.v1.ConfigMapNameReference + default: {} + - name: clientID + type: + scalar: string + default: "" + - name: clientSecret + type: + namedType: com.github.openshift.api.config.v1.SecretNameReference + default: {} + - name: url + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.GoogleIdentityProvider + map: + fields: + - name: clientID + type: + scalar: string + default: "" + - name: clientSecret + type: + namedType: com.github.openshift.api.config.v1.SecretNameReference + default: {} + - name: hostedDomain + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.HTPasswdIdentityProvider + map: + fields: + - name: fileData + type: + namedType: com.github.openshift.api.config.v1.SecretNameReference + default: {} +- name: com.github.openshift.api.config.v1.HubSource + map: + fields: + - name: disabled + type: + scalar: boolean + default: false + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.HubSourceStatus + map: + fields: + - name: message + type: + scalar: string + - name: status + type: + scalar: string +- name: com.github.openshift.api.config.v1.IBMCloudPlatformSpec + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1.IBMCloudPlatformStatus + map: + fields: + - name: cisInstanceCRN + type: + scalar: string + - name: dnsInstanceCRN + type: + scalar: string + - name: location + type: + scalar: string + - name: providerType + type: + scalar: string + - name: resourceGroupName + type: + scalar: string + - name: serviceEndpoints + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.IBMCloudServiceEndpoint + elementRelationship: associative + keys: + - name +- name: com.github.openshift.api.config.v1.IBMCloudServiceEndpoint + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: url + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.IdentityProvider + map: + fields: + - name: basicAuth + type: + namedType: com.github.openshift.api.config.v1.BasicAuthIdentityProvider + - name: github + type: + namedType: com.github.openshift.api.config.v1.GitHubIdentityProvider + - name: gitlab + type: + namedType: com.github.openshift.api.config.v1.GitLabIdentityProvider + - name: google + type: + namedType: com.github.openshift.api.config.v1.GoogleIdentityProvider + - name: htpasswd + type: + namedType: com.github.openshift.api.config.v1.HTPasswdIdentityProvider + - name: keystone + type: + namedType: com.github.openshift.api.config.v1.KeystoneIdentityProvider + - name: ldap + type: + namedType: com.github.openshift.api.config.v1.LDAPIdentityProvider + - name: mappingMethod + type: + scalar: string + - name: name + type: + scalar: string + default: "" + - name: openID + type: + namedType: com.github.openshift.api.config.v1.OpenIDIdentityProvider + - name: requestHeader + type: + namedType: com.github.openshift.api.config.v1.RequestHeaderIdentityProvider + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.Image + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.ImageSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1.ImageStatus + default: {} +- name: com.github.openshift.api.config.v1.ImageContentPolicy + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.ImageContentPolicySpec + default: {} +- name: com.github.openshift.api.config.v1.ImageContentPolicySpec + map: + fields: + - name: repositoryDigestMirrors + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.RepositoryDigestMirrors + elementRelationship: associative + keys: + - source +- name: com.github.openshift.api.config.v1.ImageDigestMirrorSet + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.ImageDigestMirrorSetSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1.ImageDigestMirrorSetStatus + default: {} +- name: com.github.openshift.api.config.v1.ImageDigestMirrorSetSpec + map: + fields: + - name: imageDigestMirrors + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.ImageDigestMirrors + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.ImageDigestMirrorSetStatus + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1.ImageDigestMirrors + map: + fields: + - name: mirrorSourcePolicy + type: + scalar: string + - name: mirrors + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: source + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.ImageLabel + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: value + type: + scalar: string +- name: com.github.openshift.api.config.v1.ImageSpec + map: + fields: + - name: additionalTrustedCA + type: + namedType: com.github.openshift.api.config.v1.ConfigMapNameReference + default: {} + - name: allowedRegistriesForImport + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.RegistryLocation + elementRelationship: atomic + - name: externalRegistryHostnames + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: imageStreamImportMode + type: + scalar: string + default: "" + - name: registrySources + type: + namedType: com.github.openshift.api.config.v1.RegistrySources + default: {} +- name: com.github.openshift.api.config.v1.ImageStatus + map: + fields: + - name: externalRegistryHostnames + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: imageStreamImportMode + type: + scalar: string + - name: internalRegistryHostname + type: + scalar: string +- name: com.github.openshift.api.config.v1.ImageTagMirrorSet + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.ImageTagMirrorSetSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1.ImageTagMirrorSetStatus + default: {} +- name: com.github.openshift.api.config.v1.ImageTagMirrorSetSpec + map: + fields: + - name: imageTagMirrors + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.ImageTagMirrors + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.ImageTagMirrorSetStatus + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1.ImageTagMirrors + map: + fields: + - name: mirrorSourcePolicy + type: + scalar: string + - name: mirrors + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: source + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.Infrastructure + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.InfrastructureSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1.InfrastructureStatus + default: {} +- name: com.github.openshift.api.config.v1.InfrastructureSpec + map: + fields: + - name: cloudConfig + type: + namedType: com.github.openshift.api.config.v1.ConfigMapFileReference + default: {} + - name: platformSpec + type: + namedType: com.github.openshift.api.config.v1.PlatformSpec + default: {} +- name: com.github.openshift.api.config.v1.InfrastructureStatus + map: + fields: + - name: apiServerInternalURI + type: + scalar: string + default: "" + - name: apiServerURL + type: + scalar: string + default: "" + - name: controlPlaneTopology + type: + scalar: string + default: "" + - name: cpuPartitioning + type: + scalar: string + default: None + - name: etcdDiscoveryDomain + type: + scalar: string + default: "" + - name: infrastructureName + type: + scalar: string + default: "" + - name: infrastructureTopology + type: + scalar: string + default: "" + - name: platform + type: + scalar: string + - name: platformStatus + type: + namedType: com.github.openshift.api.config.v1.PlatformStatus +- name: com.github.openshift.api.config.v1.Ingress + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.IngressSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1.IngressStatus + default: {} +- name: com.github.openshift.api.config.v1.IngressPlatformSpec + map: + fields: + - name: aws + type: + namedType: com.github.openshift.api.config.v1.AWSIngressSpec + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: aws + discriminatorValue: AWS +- name: com.github.openshift.api.config.v1.IngressSpec + map: + fields: + - name: appsDomain + type: + scalar: string + - name: componentRoutes + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.ComponentRouteSpec + elementRelationship: associative + keys: + - namespace + - name + - name: domain + type: + scalar: string + default: "" + - name: loadBalancer + type: + namedType: com.github.openshift.api.config.v1.LoadBalancer + default: {} + - name: requiredHSTSPolicies + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.RequiredHSTSPolicy + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.IngressStatus + map: + fields: + - name: componentRoutes + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.ComponentRouteStatus + elementRelationship: associative + keys: + - namespace + - name + - name: defaultPlacement + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.IntermediateTLSProfile + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1.KeystoneIdentityProvider + map: + fields: + - name: ca + type: + namedType: com.github.openshift.api.config.v1.ConfigMapNameReference + default: {} + - name: domainName + type: + scalar: string + default: "" + - name: tlsClientCert + type: + namedType: com.github.openshift.api.config.v1.SecretNameReference + default: {} + - name: tlsClientKey + type: + namedType: com.github.openshift.api.config.v1.SecretNameReference + default: {} + - name: url + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.KubevirtPlatformSpec + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1.KubevirtPlatformStatus + map: + fields: + - name: apiServerInternalIP + type: + scalar: string + - name: ingressIP + type: + scalar: string +- name: com.github.openshift.api.config.v1.LDAPAttributeMapping + map: + fields: + - name: email + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: id + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: name + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: preferredUsername + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.LDAPIdentityProvider + map: + fields: + - name: attributes + type: + namedType: com.github.openshift.api.config.v1.LDAPAttributeMapping + default: {} + - name: bindDN + type: + scalar: string + default: "" + - name: bindPassword + type: + namedType: com.github.openshift.api.config.v1.SecretNameReference + default: {} + - name: ca + type: + namedType: com.github.openshift.api.config.v1.ConfigMapNameReference + default: {} + - name: insecure + type: + scalar: boolean + default: false + - name: url + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.LoadBalancer + map: + fields: + - name: platform + type: + namedType: com.github.openshift.api.config.v1.IngressPlatformSpec + default: {} +- name: com.github.openshift.api.config.v1.MTUMigration + map: + fields: + - name: machine + type: + namedType: com.github.openshift.api.config.v1.MTUMigrationValues + - name: network + type: + namedType: com.github.openshift.api.config.v1.MTUMigrationValues +- name: com.github.openshift.api.config.v1.MTUMigrationValues + map: + fields: + - name: from + type: + scalar: numeric + - name: to + type: + scalar: numeric +- name: com.github.openshift.api.config.v1.MaxAgePolicy + map: + fields: + - name: largestMaxAge + type: + scalar: numeric + - name: smallestMaxAge + type: + scalar: numeric +- name: com.github.openshift.api.config.v1.ModernTLSProfile + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1.Network + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.NetworkSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1.NetworkStatus + default: {} +- name: com.github.openshift.api.config.v1.NetworkDiagnostics + map: + fields: + - name: mode + type: + scalar: string + default: "" + - name: sourcePlacement + type: + namedType: com.github.openshift.api.config.v1.NetworkDiagnosticsSourcePlacement + default: {} + - name: targetPlacement + type: + namedType: com.github.openshift.api.config.v1.NetworkDiagnosticsTargetPlacement + default: {} +- name: com.github.openshift.api.config.v1.NetworkDiagnosticsSourcePlacement + map: + fields: + - name: nodeSelector + type: + map: + elementType: + scalar: string + - name: tolerations + type: + list: + elementType: + namedType: io.k8s.api.core.v1.Toleration + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.NetworkDiagnosticsTargetPlacement + map: + fields: + - name: nodeSelector + type: + map: + elementType: + scalar: string + - name: tolerations + type: + list: + elementType: + namedType: io.k8s.api.core.v1.Toleration + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.NetworkMigration + map: + fields: + - name: mtu + type: + namedType: com.github.openshift.api.config.v1.MTUMigration + - name: networkType + type: + scalar: string +- name: com.github.openshift.api.config.v1.NetworkSpec + map: + fields: + - name: clusterNetwork + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.ClusterNetworkEntry + elementRelationship: atomic + - name: externalIP + type: + namedType: com.github.openshift.api.config.v1.ExternalIPConfig + - name: networkDiagnostics + type: + namedType: com.github.openshift.api.config.v1.NetworkDiagnostics + default: {} + - name: networkType + type: + scalar: string + default: "" + - name: serviceNetwork + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: serviceNodePortRange + type: + scalar: string +- name: com.github.openshift.api.config.v1.NetworkStatus + map: + fields: + - name: clusterNetwork + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.ClusterNetworkEntry + elementRelationship: atomic + - name: clusterNetworkMTU + type: + scalar: numeric + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: migration + type: + namedType: com.github.openshift.api.config.v1.NetworkMigration + - name: networkType + type: + scalar: string + - name: serviceNetwork + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.Node + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.NodeSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1.NodeStatus + default: {} +- name: com.github.openshift.api.config.v1.NodeSpec + map: + fields: + - name: cgroupMode + type: + scalar: string + - name: minimumKubeletVersion + type: + scalar: string + default: "" + - name: workerLatencyProfile + type: + scalar: string +- name: com.github.openshift.api.config.v1.NodeStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type +- name: com.github.openshift.api.config.v1.NutanixFailureDomain + map: + fields: + - name: cluster + type: + namedType: com.github.openshift.api.config.v1.NutanixResourceIdentifier + default: {} + - name: name + type: + scalar: string + default: "" + - name: subnets + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.NutanixResourceIdentifier + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.NutanixPlatformLoadBalancer + map: + fields: + - name: type + type: + scalar: string + default: OpenShiftManagedDefault + unions: + - discriminator: type +- name: com.github.openshift.api.config.v1.NutanixPlatformSpec + map: + fields: + - name: failureDomains + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.NutanixFailureDomain + elementRelationship: associative + keys: + - name + - name: prismCentral + type: + namedType: com.github.openshift.api.config.v1.NutanixPrismEndpoint + default: {} + - name: prismElements + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.NutanixPrismElementEndpoint + elementRelationship: associative + keys: + - name +- name: com.github.openshift.api.config.v1.NutanixPlatformStatus + map: + fields: + - name: apiServerInternalIP + type: + scalar: string + - name: apiServerInternalIPs + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: ingressIP + type: + scalar: string + - name: ingressIPs + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: loadBalancer + type: + namedType: com.github.openshift.api.config.v1.NutanixPlatformLoadBalancer + default: + type: OpenShiftManagedDefault +- name: com.github.openshift.api.config.v1.NutanixPrismElementEndpoint + map: + fields: + - name: endpoint + type: + namedType: com.github.openshift.api.config.v1.NutanixPrismEndpoint + default: {} + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.NutanixPrismEndpoint + map: + fields: + - name: address + type: + scalar: string + default: "" + - name: port + type: + scalar: numeric + default: 0 +- name: com.github.openshift.api.config.v1.NutanixResourceIdentifier + map: + fields: + - name: name + type: + scalar: string + - name: type + type: + scalar: string + default: "" + - name: uuid + type: + scalar: string + unions: + - discriminator: type + fields: + - fieldName: name + discriminatorValue: Name + - fieldName: uuid + discriminatorValue: UUID +- name: com.github.openshift.api.config.v1.OAuth + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.OAuthSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1.OAuthStatus + default: {} +- name: com.github.openshift.api.config.v1.OAuthSpec + map: + fields: + - name: identityProviders + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.IdentityProvider + elementRelationship: atomic + - name: templates + type: + namedType: com.github.openshift.api.config.v1.OAuthTemplates + default: {} + - name: tokenConfig + type: + namedType: com.github.openshift.api.config.v1.TokenConfig + default: {} +- name: com.github.openshift.api.config.v1.OAuthStatus + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1.OAuthTemplates + map: + fields: + - name: error + type: + namedType: com.github.openshift.api.config.v1.SecretNameReference + default: {} + - name: login + type: + namedType: com.github.openshift.api.config.v1.SecretNameReference + default: {} + - name: providerSelection + type: + namedType: com.github.openshift.api.config.v1.SecretNameReference + default: {} +- name: com.github.openshift.api.config.v1.OIDCClientConfig + map: + fields: + - name: clientID + type: + scalar: string + default: "" + - name: clientSecret + type: + namedType: com.github.openshift.api.config.v1.SecretNameReference + default: {} + - name: componentName + type: + scalar: string + default: "" + - name: componentNamespace + type: + scalar: string + default: "" + - name: extraScopes + type: + list: + elementType: + scalar: string + elementRelationship: associative +- name: com.github.openshift.api.config.v1.OIDCClientReference + map: + fields: + - name: clientID + type: + scalar: string + default: "" + - name: issuerURL + type: + scalar: string + default: "" + - name: oidcProviderName + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.OIDCClientStatus + map: + fields: + - name: componentName + type: + scalar: string + default: "" + - name: componentNamespace + type: + scalar: string + default: "" + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: consumingUsers + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: currentOIDCClients + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.OIDCClientReference + elementRelationship: associative + keys: + - issuerURL + - clientID +- name: com.github.openshift.api.config.v1.OIDCProvider + map: + fields: + - name: claimMappings + type: + namedType: com.github.openshift.api.config.v1.TokenClaimMappings + default: {} + - name: claimValidationRules + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.TokenClaimValidationRule + elementRelationship: atomic + - name: issuer + type: + namedType: com.github.openshift.api.config.v1.TokenIssuer + default: {} + - name: name + type: + scalar: string + default: "" + - name: oidcClients + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.OIDCClientConfig + elementRelationship: associative + keys: + - componentNamespace + - componentName +- name: com.github.openshift.api.config.v1.ObjectReference + map: + fields: + - name: group + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: namespace + type: + scalar: string + - name: resource + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.OldTLSProfile + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1.OpenIDClaims + map: + fields: + - name: email + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: groups + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: name + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: preferredUsername + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.OpenIDIdentityProvider + map: + fields: + - name: ca + type: + namedType: com.github.openshift.api.config.v1.ConfigMapNameReference + default: {} + - name: claims + type: + namedType: com.github.openshift.api.config.v1.OpenIDClaims + default: {} + - name: clientID + type: + scalar: string + default: "" + - name: clientSecret + type: + namedType: com.github.openshift.api.config.v1.SecretNameReference + default: {} + - name: extraAuthorizeParameters + type: + map: + elementType: + scalar: string + - name: extraScopes + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: issuer + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.OpenStackPlatformLoadBalancer + map: + fields: + - name: type + type: + scalar: string + default: OpenShiftManagedDefault + unions: + - discriminator: type +- name: com.github.openshift.api.config.v1.OpenStackPlatformSpec + map: + fields: + - name: apiServerInternalIPs + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: ingressIPs + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: machineNetworks + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.OpenStackPlatformStatus + map: + fields: + - name: apiServerInternalIP + type: + scalar: string + - name: apiServerInternalIPs + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: cloudName + type: + scalar: string + - name: ingressIP + type: + scalar: string + - name: ingressIPs + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: loadBalancer + type: + namedType: com.github.openshift.api.config.v1.OpenStackPlatformLoadBalancer + default: + type: OpenShiftManagedDefault + - name: machineNetworks + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: nodeDNSIP + type: + scalar: string +- name: com.github.openshift.api.config.v1.OperandVersion + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: version + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.OperatorHub + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.OperatorHubSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1.OperatorHubStatus + default: {} +- name: com.github.openshift.api.config.v1.OperatorHubSpec + map: + fields: + - name: disableAllDefaultSources + type: + scalar: boolean + - name: sources + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.HubSource + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.OperatorHubStatus + map: + fields: + - name: sources + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.HubSourceStatus + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.OvirtPlatformLoadBalancer + map: + fields: + - name: type + type: + scalar: string + default: OpenShiftManagedDefault + unions: + - discriminator: type +- name: com.github.openshift.api.config.v1.OvirtPlatformSpec + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1.OvirtPlatformStatus + map: + fields: + - name: apiServerInternalIP + type: + scalar: string + - name: apiServerInternalIPs + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: ingressIP + type: + scalar: string + - name: ingressIPs + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: loadBalancer + type: + namedType: com.github.openshift.api.config.v1.OvirtPlatformLoadBalancer + default: + type: OpenShiftManagedDefault + - name: nodeDNSIP + type: + scalar: string +- name: com.github.openshift.api.config.v1.PlatformSpec + map: + fields: + - name: alibabaCloud + type: + namedType: com.github.openshift.api.config.v1.AlibabaCloudPlatformSpec + - name: aws + type: + namedType: com.github.openshift.api.config.v1.AWSPlatformSpec + - name: azure + type: + namedType: com.github.openshift.api.config.v1.AzurePlatformSpec + - name: baremetal + type: + namedType: com.github.openshift.api.config.v1.BareMetalPlatformSpec + - name: equinixMetal + type: + namedType: com.github.openshift.api.config.v1.EquinixMetalPlatformSpec + - name: external + type: + namedType: com.github.openshift.api.config.v1.ExternalPlatformSpec + - name: gcp + type: + namedType: com.github.openshift.api.config.v1.GCPPlatformSpec + - name: ibmcloud + type: + namedType: com.github.openshift.api.config.v1.IBMCloudPlatformSpec + - name: kubevirt + type: + namedType: com.github.openshift.api.config.v1.KubevirtPlatformSpec + - name: nutanix + type: + namedType: com.github.openshift.api.config.v1.NutanixPlatformSpec + - name: openstack + type: + namedType: com.github.openshift.api.config.v1.OpenStackPlatformSpec + - name: ovirt + type: + namedType: com.github.openshift.api.config.v1.OvirtPlatformSpec + - name: powervs + type: + namedType: com.github.openshift.api.config.v1.PowerVSPlatformSpec + - name: type + type: + scalar: string + default: "" + - name: vsphere + type: + namedType: com.github.openshift.api.config.v1.VSpherePlatformSpec +- name: com.github.openshift.api.config.v1.PlatformStatus + map: + fields: + - name: alibabaCloud + type: + namedType: com.github.openshift.api.config.v1.AlibabaCloudPlatformStatus + - name: aws + type: + namedType: com.github.openshift.api.config.v1.AWSPlatformStatus + - name: azure + type: + namedType: com.github.openshift.api.config.v1.AzurePlatformStatus + - name: baremetal + type: + namedType: com.github.openshift.api.config.v1.BareMetalPlatformStatus + - name: equinixMetal + type: + namedType: com.github.openshift.api.config.v1.EquinixMetalPlatformStatus + - name: external + type: + namedType: com.github.openshift.api.config.v1.ExternalPlatformStatus + - name: gcp + type: + namedType: com.github.openshift.api.config.v1.GCPPlatformStatus + - name: ibmcloud + type: + namedType: com.github.openshift.api.config.v1.IBMCloudPlatformStatus + - name: kubevirt + type: + namedType: com.github.openshift.api.config.v1.KubevirtPlatformStatus + - name: nutanix + type: + namedType: com.github.openshift.api.config.v1.NutanixPlatformStatus + - name: openstack + type: + namedType: com.github.openshift.api.config.v1.OpenStackPlatformStatus + - name: ovirt + type: + namedType: com.github.openshift.api.config.v1.OvirtPlatformStatus + - name: powervs + type: + namedType: com.github.openshift.api.config.v1.PowerVSPlatformStatus + - name: type + type: + scalar: string + default: "" + - name: vsphere + type: + namedType: com.github.openshift.api.config.v1.VSpherePlatformStatus +- name: com.github.openshift.api.config.v1.PowerVSPlatformSpec + map: + fields: + - name: serviceEndpoints + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.PowerVSServiceEndpoint + elementRelationship: associative + keys: + - name +- name: com.github.openshift.api.config.v1.PowerVSPlatformStatus + map: + fields: + - name: cisInstanceCRN + type: + scalar: string + - name: dnsInstanceCRN + type: + scalar: string + - name: region + type: + scalar: string + default: "" + - name: resourceGroup + type: + scalar: string + default: "" + - name: serviceEndpoints + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.PowerVSServiceEndpoint + elementRelationship: associative + keys: + - name + - name: zone + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.PowerVSServiceEndpoint + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: url + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.PrefixedClaimMapping + map: + fields: + - name: claim + type: + scalar: string + default: "" + - name: prefix + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.ProfileCustomizations + map: + fields: + - name: dynamicResourceAllocation + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.Project + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.ProjectSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1.ProjectStatus + default: {} +- name: com.github.openshift.api.config.v1.ProjectSpec + map: + fields: + - name: projectRequestMessage + type: + scalar: string + default: "" + - name: projectRequestTemplate + type: + namedType: com.github.openshift.api.config.v1.TemplateReference + default: {} +- name: com.github.openshift.api.config.v1.ProjectStatus + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1.PromQLClusterCondition + map: + fields: + - name: promql + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.Proxy + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.ProxySpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1.ProxyStatus + default: {} +- name: com.github.openshift.api.config.v1.ProxySpec + map: + fields: + - name: httpProxy + type: + scalar: string + - name: httpsProxy + type: + scalar: string + - name: noProxy + type: + scalar: string + - name: readinessEndpoints + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: trustedCA + type: + namedType: com.github.openshift.api.config.v1.ConfigMapNameReference + default: {} +- name: com.github.openshift.api.config.v1.ProxyStatus + map: + fields: + - name: httpProxy + type: + scalar: string + - name: httpsProxy + type: + scalar: string + - name: noProxy + type: + scalar: string +- name: com.github.openshift.api.config.v1.RegistryLocation + map: + fields: + - name: domainName + type: + scalar: string + default: "" + - name: insecure + type: + scalar: boolean +- name: com.github.openshift.api.config.v1.RegistrySources + map: + fields: + - name: allowedRegistries + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: blockedRegistries + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: containerRuntimeSearchRegistries + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: insecureRegistries + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.Release + map: + fields: + - name: architecture + type: + scalar: string + - name: channels + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: image + type: + scalar: string + default: "" + - name: url + type: + scalar: string + - name: version + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.RepositoryDigestMirrors + map: + fields: + - name: allowMirrorByTags + type: + scalar: boolean + - name: mirrors + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: source + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.RequestHeaderIdentityProvider + map: + fields: + - name: ca + type: + namedType: com.github.openshift.api.config.v1.ConfigMapNameReference + default: {} + - name: challengeURL + type: + scalar: string + default: "" + - name: clientCommonNames + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: emailHeaders + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: headers + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: loginURL + type: + scalar: string + default: "" + - name: nameHeaders + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: preferredUsernameHeaders + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.RequiredHSTSPolicy + map: + fields: + - name: domainPatterns + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: includeSubDomainsPolicy + type: + scalar: string + - name: maxAge + type: + namedType: com.github.openshift.api.config.v1.MaxAgePolicy + default: {} + - name: namespaceSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: preloadPolicy + type: + scalar: string +- name: com.github.openshift.api.config.v1.Scheduler + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.SchedulerSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1.SchedulerStatus + default: {} +- name: com.github.openshift.api.config.v1.SchedulerSpec + map: + fields: + - name: defaultNodeSelector + type: + scalar: string + - name: mastersSchedulable + type: + scalar: boolean + default: false + - name: policy + type: + namedType: com.github.openshift.api.config.v1.ConfigMapNameReference + default: {} + - name: profile + type: + scalar: string + - name: profileCustomizations + type: + namedType: com.github.openshift.api.config.v1.ProfileCustomizations + default: {} +- name: com.github.openshift.api.config.v1.SchedulerStatus + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1.SecretNameReference + map: + fields: + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.SignatureStore + map: + fields: + - name: ca + type: + namedType: com.github.openshift.api.config.v1.ConfigMapNameReference + default: {} + - name: url + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.TLSSecurityProfile + map: + fields: + - name: custom + type: + namedType: com.github.openshift.api.config.v1.CustomTLSProfile + - name: intermediate + type: + namedType: com.github.openshift.api.config.v1.IntermediateTLSProfile + - name: modern + type: + namedType: com.github.openshift.api.config.v1.ModernTLSProfile + - name: old + type: + namedType: com.github.openshift.api.config.v1.OldTLSProfile + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: custom + discriminatorValue: Custom + - fieldName: intermediate + discriminatorValue: Intermediate + - fieldName: modern + discriminatorValue: Modern + - fieldName: old + discriminatorValue: Old +- name: com.github.openshift.api.config.v1.TemplateReference + map: + fields: + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.TokenClaimMappings + map: + fields: + - name: groups + type: + namedType: com.github.openshift.api.config.v1.PrefixedClaimMapping + default: {} + - name: username + type: + namedType: com.github.openshift.api.config.v1.UsernameClaimMapping + default: {} +- name: com.github.openshift.api.config.v1.TokenClaimValidationRule + map: + fields: + - name: requiredClaim + type: + namedType: com.github.openshift.api.config.v1.TokenRequiredClaim + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.TokenConfig + map: + fields: + - name: accessTokenInactivityTimeout + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Duration + - name: accessTokenInactivityTimeoutSeconds + type: + scalar: numeric + - name: accessTokenMaxAgeSeconds + type: + scalar: numeric +- name: com.github.openshift.api.config.v1.TokenIssuer + map: + fields: + - name: audiences + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: issuerCertificateAuthority + type: + namedType: com.github.openshift.api.config.v1.ConfigMapNameReference + default: {} + - name: issuerURL + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.TokenRequiredClaim + map: + fields: + - name: claim + type: + scalar: string + default: "" + - name: requiredValue + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.Update + map: + fields: + - name: architecture + type: + scalar: string + default: "" + - name: force + type: + scalar: boolean + default: false + - name: image + type: + scalar: string + default: "" + - name: version + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.UpdateHistory + map: + fields: + - name: acceptedRisks + type: + scalar: string + - name: completionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: image + type: + scalar: string + default: "" + - name: startedTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: state + type: + scalar: string + default: "" + - name: verified + type: + scalar: boolean + default: false + - name: version + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.UsernameClaimMapping + map: + fields: + - name: claim + type: + scalar: string + default: "" + - name: prefix + type: + namedType: com.github.openshift.api.config.v1.UsernamePrefix + - name: prefixPolicy + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.UsernamePrefix + map: + fields: + - name: prefixString + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.VSphereFailureDomainHostGroup + map: + fields: + - name: hostGroup + type: + scalar: string + default: "" + - name: vmGroup + type: + scalar: string + default: "" + - name: vmHostRule + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.VSphereFailureDomainRegionAffinity + map: + fields: + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type +- name: com.github.openshift.api.config.v1.VSphereFailureDomainZoneAffinity + map: + fields: + - name: hostGroup + type: + namedType: com.github.openshift.api.config.v1.VSphereFailureDomainHostGroup + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: hostGroup + discriminatorValue: HostGroup +- name: com.github.openshift.api.config.v1.VSpherePlatformFailureDomainSpec + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: region + type: + scalar: string + default: "" + - name: regionAffinity + type: + namedType: com.github.openshift.api.config.v1.VSphereFailureDomainRegionAffinity + - name: server + type: + scalar: string + default: "" + - name: topology + type: + namedType: com.github.openshift.api.config.v1.VSpherePlatformTopology + default: {} + - name: zone + type: + scalar: string + default: "" + - name: zoneAffinity + type: + namedType: com.github.openshift.api.config.v1.VSphereFailureDomainZoneAffinity +- name: com.github.openshift.api.config.v1.VSpherePlatformLoadBalancer + map: + fields: + - name: type + type: + scalar: string + default: OpenShiftManagedDefault + unions: + - discriminator: type +- name: com.github.openshift.api.config.v1.VSpherePlatformNodeNetworking + map: + fields: + - name: external + type: + namedType: com.github.openshift.api.config.v1.VSpherePlatformNodeNetworkingSpec + default: {} + - name: internal + type: + namedType: com.github.openshift.api.config.v1.VSpherePlatformNodeNetworkingSpec + default: {} +- name: com.github.openshift.api.config.v1.VSpherePlatformNodeNetworkingSpec + map: + fields: + - name: excludeNetworkSubnetCidr + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: network + type: + scalar: string + - name: networkSubnetCidr + type: + list: + elementType: + scalar: string + elementRelationship: associative +- name: com.github.openshift.api.config.v1.VSpherePlatformSpec + map: + fields: + - name: apiServerInternalIPs + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: failureDomains + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.VSpherePlatformFailureDomainSpec + elementRelationship: associative + keys: + - name + - name: ingressIPs + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: machineNetworks + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: nodeNetworking + type: + namedType: com.github.openshift.api.config.v1.VSpherePlatformNodeNetworking + default: {} + - name: vcenters + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.VSpherePlatformVCenterSpec + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.VSpherePlatformStatus + map: + fields: + - name: apiServerInternalIP + type: + scalar: string + - name: apiServerInternalIPs + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: ingressIP + type: + scalar: string + - name: ingressIPs + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: loadBalancer + type: + namedType: com.github.openshift.api.config.v1.VSpherePlatformLoadBalancer + default: + type: OpenShiftManagedDefault + - name: machineNetworks + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: nodeDNSIP + type: + scalar: string +- name: com.github.openshift.api.config.v1.VSpherePlatformTopology + map: + fields: + - name: computeCluster + type: + scalar: string + default: "" + - name: datacenter + type: + scalar: string + default: "" + - name: datastore + type: + scalar: string + default: "" + - name: folder + type: + scalar: string + - name: networks + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: resourcePool + type: + scalar: string + - name: template + type: + scalar: string +- name: com.github.openshift.api.config.v1.VSpherePlatformVCenterSpec + map: + fields: + - name: datacenters + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: port + type: + scalar: numeric + - name: server + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.WebhookTokenAuthenticator + map: + fields: + - name: kubeConfig + type: + namedType: com.github.openshift.api.config.v1.SecretNameReference + default: {} +- name: com.github.openshift.api.config.v1alpha1.Backup + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1alpha1.BackupSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1alpha1.BackupStatus + default: {} +- name: com.github.openshift.api.config.v1alpha1.BackupSpec + map: + fields: + - name: etcd + type: + namedType: com.github.openshift.api.config.v1alpha1.EtcdBackupSpec + default: {} +- name: com.github.openshift.api.config.v1alpha1.BackupStatus + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1alpha1.ClusterImagePolicy + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1alpha1.ClusterImagePolicySpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1alpha1.ClusterImagePolicyStatus + default: {} +- name: com.github.openshift.api.config.v1alpha1.ClusterImagePolicySpec + map: + fields: + - name: policy + type: + namedType: com.github.openshift.api.config.v1alpha1.Policy + default: {} + - name: scopes + type: + list: + elementType: + scalar: string + elementRelationship: associative +- name: com.github.openshift.api.config.v1alpha1.ClusterImagePolicyStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type +- name: com.github.openshift.api.config.v1alpha1.EtcdBackupSpec + map: + fields: + - name: pvcName + type: + scalar: string + default: "" + - name: retentionPolicy + type: + namedType: com.github.openshift.api.config.v1alpha1.RetentionPolicy + default: {} + - name: schedule + type: + scalar: string + default: "" + - name: timeZone + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1alpha1.FulcioCAWithRekor + map: + fields: + - name: fulcioCAData + type: + scalar: string + - name: fulcioSubject + type: + namedType: com.github.openshift.api.config.v1alpha1.PolicyFulcioSubject + default: {} + - name: rekorKeyData + type: + scalar: string +- name: com.github.openshift.api.config.v1alpha1.GatherConfig + map: + fields: + - name: dataPolicy + type: + scalar: string + - name: disabledGatherers + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.config.v1alpha1.ImagePolicy + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1alpha1.ImagePolicySpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1alpha1.ImagePolicyStatus + default: {} +- name: com.github.openshift.api.config.v1alpha1.ImagePolicySpec + map: + fields: + - name: policy + type: + namedType: com.github.openshift.api.config.v1alpha1.Policy + default: {} + - name: scopes + type: + list: + elementType: + scalar: string + elementRelationship: associative +- name: com.github.openshift.api.config.v1alpha1.ImagePolicyStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type +- name: com.github.openshift.api.config.v1alpha1.InsightsDataGather + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1alpha1.InsightsDataGatherSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1alpha1.InsightsDataGatherStatus + default: {} +- name: com.github.openshift.api.config.v1alpha1.InsightsDataGatherSpec + map: + fields: + - name: gatherConfig + type: + namedType: com.github.openshift.api.config.v1alpha1.GatherConfig + default: {} +- name: com.github.openshift.api.config.v1alpha1.InsightsDataGatherStatus + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1alpha1.Policy + map: + fields: + - name: rootOfTrust + type: + namedType: com.github.openshift.api.config.v1alpha1.PolicyRootOfTrust + default: {} + - name: signedIdentity + type: + namedType: com.github.openshift.api.config.v1alpha1.PolicyIdentity + default: {} +- name: com.github.openshift.api.config.v1alpha1.PolicyFulcioSubject + map: + fields: + - name: oidcIssuer + type: + scalar: string + default: "" + - name: signedEmail + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1alpha1.PolicyIdentity + map: + fields: + - name: exactRepository + type: + namedType: com.github.openshift.api.config.v1alpha1.PolicyMatchExactRepository + - name: matchPolicy + type: + scalar: string + default: "" + - name: remapIdentity + type: + namedType: com.github.openshift.api.config.v1alpha1.PolicyMatchRemapIdentity + unions: + - discriminator: matchPolicy + fields: + - fieldName: exactRepository + discriminatorValue: PolicyMatchExactRepository + - fieldName: remapIdentity + discriminatorValue: PolicyMatchRemapIdentity +- name: com.github.openshift.api.config.v1alpha1.PolicyMatchExactRepository + map: + fields: + - name: repository + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1alpha1.PolicyMatchRemapIdentity + map: + fields: + - name: prefix + type: + scalar: string + default: "" + - name: signedPrefix + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1alpha1.PolicyRootOfTrust + map: + fields: + - name: fulcioCAWithRekor + type: + namedType: com.github.openshift.api.config.v1alpha1.FulcioCAWithRekor + - name: policyType + type: + scalar: string + default: "" + - name: publicKey + type: + namedType: com.github.openshift.api.config.v1alpha1.PublicKey + unions: + - discriminator: policyType + fields: + - fieldName: fulcioCAWithRekor + discriminatorValue: FulcioCAWithRekor + - fieldName: publicKey + discriminatorValue: PublicKey +- name: com.github.openshift.api.config.v1alpha1.PublicKey + map: + fields: + - name: keyData + type: + scalar: string + - name: rekorKeyData + type: + scalar: string +- name: com.github.openshift.api.config.v1alpha1.RetentionNumberConfig + map: + fields: + - name: maxNumberOfBackups + type: + scalar: numeric +- name: com.github.openshift.api.config.v1alpha1.RetentionPolicy + map: + fields: + - name: retentionNumber + type: + namedType: com.github.openshift.api.config.v1alpha1.RetentionNumberConfig + - name: retentionSize + type: + namedType: com.github.openshift.api.config.v1alpha1.RetentionSizeConfig + - name: retentionType + type: + scalar: string + default: "" + unions: + - discriminator: retentionType + fields: + - fieldName: retentionNumber + discriminatorValue: RetentionNumber + - fieldName: retentionSize + discriminatorValue: RetentionSize +- name: com.github.openshift.api.config.v1alpha1.RetentionSizeConfig + map: + fields: + - name: maxSizeOfBackupsGb + type: + scalar: numeric +- name: io.k8s.api.core.v1.ConfigMapKeySelector + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: optional + type: + scalar: boolean + elementRelationship: atomic +- name: io.k8s.api.core.v1.EnvVar + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: value + type: + scalar: string + - name: valueFrom + type: + namedType: io.k8s.api.core.v1.EnvVarSource +- name: io.k8s.api.core.v1.EnvVarSource + map: + fields: + - name: configMapKeyRef + type: + namedType: io.k8s.api.core.v1.ConfigMapKeySelector + - name: fieldRef + type: + namedType: io.k8s.api.core.v1.ObjectFieldSelector + - name: resourceFieldRef + type: + namedType: io.k8s.api.core.v1.ResourceFieldSelector + - name: secretKeyRef + type: + namedType: io.k8s.api.core.v1.SecretKeySelector +- name: io.k8s.api.core.v1.ObjectFieldSelector + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldPath + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.api.core.v1.ResourceClaim + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: request + type: + scalar: string +- name: io.k8s.api.core.v1.ResourceFieldSelector + map: + fields: + - name: containerName + type: + scalar: string + - name: divisor + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: resource + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.api.core.v1.ResourceRequirements + map: + fields: + - name: claims + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ResourceClaim + elementRelationship: associative + keys: + - name + - name: limits + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: requests + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.core.v1.SecretKeySelector + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: optional + type: + scalar: boolean + elementRelationship: atomic +- name: io.k8s.api.core.v1.Toleration + map: + fields: + - name: effect + type: + scalar: string + - name: key + type: + scalar: string + - name: operator + type: + scalar: string + - name: tolerationSeconds + type: + scalar: numeric + - name: value + type: + scalar: string +- name: io.k8s.apimachinery.pkg.api.resource.Quantity + scalar: untyped +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + map: + fields: + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message + type: + scalar: string + default: "" + - name: observedGeneration + type: + scalar: numeric + - name: reason + type: + scalar: string + default: "" + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Duration + scalar: string +- name: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + map: + fields: + - name: matchExpressions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement + elementRelationship: atomic + - name: matchLabels + type: + map: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: operator + type: + scalar: string + default: "" + - name: values + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldsType + type: + scalar: string + - name: fieldsV1 + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + - name: manager + type: + scalar: string + - name: operation + type: + scalar: string + - name: subresource + type: + scalar: string + - name: time + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + map: + fields: + - name: annotations + type: + map: + elementType: + scalar: string + - name: creationTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: deletionGracePeriodSeconds + type: + scalar: numeric + - name: deletionTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: finalizers + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: generateName + type: + scalar: string + - name: generation + type: + scalar: numeric + - name: labels + type: + map: + elementType: + scalar: string + - name: managedFields + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + elementRelationship: atomic + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: ownerReferences + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + elementRelationship: associative + keys: + - uid + - name: resourceVersion + type: + scalar: string + - name: selfLink + type: + scalar: string + - name: uid + type: + scalar: string +- name: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + map: + fields: + - name: apiVersion + type: + scalar: string + default: "" + - name: blockOwnerDeletion + type: + scalar: boolean + - name: controller + type: + scalar: boolean + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: uid + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Time + scalar: untyped +- name: io.k8s.apimachinery.pkg.runtime.RawExtension + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +- name: __untyped_deduced_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +`) diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/utils.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/utils.go new file mode 100644 index 0000000000000..1e9f3f6d898fa --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/utils.go @@ -0,0 +1,466 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package applyconfigurations + +import ( + v1 "github.com/openshift/api/config/v1" + v1alpha1 "github.com/openshift/api/config/v1alpha1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + configv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no +// apply configuration type exists for the given GroupVersionKind. +func ForKind(kind schema.GroupVersionKind) interface{} { + switch kind { + // Group=config.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithKind("AlibabaCloudPlatformStatus"): + return &configv1.AlibabaCloudPlatformStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("AlibabaCloudResourceTag"): + return &configv1.AlibabaCloudResourceTagApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("APIServer"): + return &configv1.APIServerApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("APIServerEncryption"): + return &configv1.APIServerEncryptionApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("APIServerNamedServingCert"): + return &configv1.APIServerNamedServingCertApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("APIServerServingCerts"): + return &configv1.APIServerServingCertsApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("APIServerSpec"): + return &configv1.APIServerSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Audit"): + return &configv1.AuditApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("AuditCustomRule"): + return &configv1.AuditCustomRuleApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Authentication"): + return &configv1.AuthenticationApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("AuthenticationSpec"): + return &configv1.AuthenticationSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("AuthenticationStatus"): + return &configv1.AuthenticationStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("AWSDNSSpec"): + return &configv1.AWSDNSSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("AWSIngressSpec"): + return &configv1.AWSIngressSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("AWSPlatformSpec"): + return &configv1.AWSPlatformSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("AWSPlatformStatus"): + return &configv1.AWSPlatformStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("AWSResourceTag"): + return &configv1.AWSResourceTagApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("AWSServiceEndpoint"): + return &configv1.AWSServiceEndpointApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("AzurePlatformStatus"): + return &configv1.AzurePlatformStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("AzureResourceTag"): + return &configv1.AzureResourceTagApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("BareMetalPlatformLoadBalancer"): + return &configv1.BareMetalPlatformLoadBalancerApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("BareMetalPlatformSpec"): + return &configv1.BareMetalPlatformSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("BareMetalPlatformStatus"): + return &configv1.BareMetalPlatformStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("BasicAuthIdentityProvider"): + return &configv1.BasicAuthIdentityProviderApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Build"): + return &configv1.BuildApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("BuildDefaults"): + return &configv1.BuildDefaultsApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("BuildOverrides"): + return &configv1.BuildOverridesApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("BuildSpec"): + return &configv1.BuildSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("CloudControllerManagerStatus"): + return &configv1.CloudControllerManagerStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("CloudLoadBalancerConfig"): + return &configv1.CloudLoadBalancerConfigApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("CloudLoadBalancerIPs"): + return &configv1.CloudLoadBalancerIPsApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ClusterCondition"): + return &configv1.ClusterConditionApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ClusterNetworkEntry"): + return &configv1.ClusterNetworkEntryApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ClusterOperator"): + return &configv1.ClusterOperatorApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ClusterOperatorStatus"): + return &configv1.ClusterOperatorStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ClusterOperatorStatusCondition"): + return &configv1.ClusterOperatorStatusConditionApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ClusterVersion"): + return &configv1.ClusterVersionApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ClusterVersionCapabilitiesSpec"): + return &configv1.ClusterVersionCapabilitiesSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ClusterVersionCapabilitiesStatus"): + return &configv1.ClusterVersionCapabilitiesStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ClusterVersionSpec"): + return &configv1.ClusterVersionSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ClusterVersionStatus"): + return &configv1.ClusterVersionStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ComponentOverride"): + return &configv1.ComponentOverrideApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ComponentRouteSpec"): + return &configv1.ComponentRouteSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ComponentRouteStatus"): + return &configv1.ComponentRouteStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ConditionalUpdate"): + return &configv1.ConditionalUpdateApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ConditionalUpdateRisk"): + return &configv1.ConditionalUpdateRiskApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ConfigMapFileReference"): + return &configv1.ConfigMapFileReferenceApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ConfigMapNameReference"): + return &configv1.ConfigMapNameReferenceApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Console"): + return &configv1.ConsoleApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ConsoleAuthentication"): + return &configv1.ConsoleAuthenticationApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ConsoleSpec"): + return &configv1.ConsoleSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ConsoleStatus"): + return &configv1.ConsoleStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("CustomFeatureGates"): + return &configv1.CustomFeatureGatesApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("CustomTLSProfile"): + return &configv1.CustomTLSProfileApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("DeprecatedWebhookTokenAuthenticator"): + return &configv1.DeprecatedWebhookTokenAuthenticatorApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("DNS"): + return &configv1.DNSApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("DNSPlatformSpec"): + return &configv1.DNSPlatformSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("DNSSpec"): + return &configv1.DNSSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("DNSZone"): + return &configv1.DNSZoneApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("EquinixMetalPlatformStatus"): + return &configv1.EquinixMetalPlatformStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ExternalIPConfig"): + return &configv1.ExternalIPConfigApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ExternalIPPolicy"): + return &configv1.ExternalIPPolicyApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ExternalPlatformSpec"): + return &configv1.ExternalPlatformSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ExternalPlatformStatus"): + return &configv1.ExternalPlatformStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("FeatureGate"): + return &configv1.FeatureGateApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("FeatureGateAttributes"): + return &configv1.FeatureGateAttributesApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("FeatureGateDetails"): + return &configv1.FeatureGateDetailsApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("FeatureGateSelection"): + return &configv1.FeatureGateSelectionApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("FeatureGateSpec"): + return &configv1.FeatureGateSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("FeatureGateStatus"): + return &configv1.FeatureGateStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("GCPPlatformStatus"): + return &configv1.GCPPlatformStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("GCPResourceLabel"): + return &configv1.GCPResourceLabelApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("GCPResourceTag"): + return &configv1.GCPResourceTagApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("GitHubIdentityProvider"): + return &configv1.GitHubIdentityProviderApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("GitLabIdentityProvider"): + return &configv1.GitLabIdentityProviderApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("GoogleIdentityProvider"): + return &configv1.GoogleIdentityProviderApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("HTPasswdIdentityProvider"): + return &configv1.HTPasswdIdentityProviderApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("HubSource"): + return &configv1.HubSourceApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("HubSourceStatus"): + return &configv1.HubSourceStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("IBMCloudPlatformStatus"): + return &configv1.IBMCloudPlatformStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("IBMCloudServiceEndpoint"): + return &configv1.IBMCloudServiceEndpointApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("IdentityProvider"): + return &configv1.IdentityProviderApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("IdentityProviderConfig"): + return &configv1.IdentityProviderConfigApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Image"): + return &configv1.ImageApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImageContentPolicy"): + return &configv1.ImageContentPolicyApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImageContentPolicySpec"): + return &configv1.ImageContentPolicySpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImageDigestMirrors"): + return &configv1.ImageDigestMirrorsApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImageDigestMirrorSet"): + return &configv1.ImageDigestMirrorSetApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImageDigestMirrorSetSpec"): + return &configv1.ImageDigestMirrorSetSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImageLabel"): + return &configv1.ImageLabelApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImageSpec"): + return &configv1.ImageSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImageStatus"): + return &configv1.ImageStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImageTagMirrors"): + return &configv1.ImageTagMirrorsApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImageTagMirrorSet"): + return &configv1.ImageTagMirrorSetApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImageTagMirrorSetSpec"): + return &configv1.ImageTagMirrorSetSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Infrastructure"): + return &configv1.InfrastructureApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("InfrastructureSpec"): + return &configv1.InfrastructureSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("InfrastructureStatus"): + return &configv1.InfrastructureStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Ingress"): + return &configv1.IngressApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("IngressPlatformSpec"): + return &configv1.IngressPlatformSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("IngressSpec"): + return &configv1.IngressSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("IngressStatus"): + return &configv1.IngressStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("KeystoneIdentityProvider"): + return &configv1.KeystoneIdentityProviderApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("KubevirtPlatformStatus"): + return &configv1.KubevirtPlatformStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("LDAPAttributeMapping"): + return &configv1.LDAPAttributeMappingApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("LDAPIdentityProvider"): + return &configv1.LDAPIdentityProviderApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("LoadBalancer"): + return &configv1.LoadBalancerApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("MaxAgePolicy"): + return &configv1.MaxAgePolicyApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("MTUMigration"): + return &configv1.MTUMigrationApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("MTUMigrationValues"): + return &configv1.MTUMigrationValuesApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Network"): + return &configv1.NetworkApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("NetworkDiagnostics"): + return &configv1.NetworkDiagnosticsApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("NetworkDiagnosticsSourcePlacement"): + return &configv1.NetworkDiagnosticsSourcePlacementApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("NetworkDiagnosticsTargetPlacement"): + return &configv1.NetworkDiagnosticsTargetPlacementApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("NetworkMigration"): + return &configv1.NetworkMigrationApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("NetworkSpec"): + return &configv1.NetworkSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("NetworkStatus"): + return &configv1.NetworkStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Node"): + return &configv1.NodeApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("NodeSpec"): + return &configv1.NodeSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("NodeStatus"): + return &configv1.NodeStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("NutanixFailureDomain"): + return &configv1.NutanixFailureDomainApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("NutanixPlatformLoadBalancer"): + return &configv1.NutanixPlatformLoadBalancerApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("NutanixPlatformSpec"): + return &configv1.NutanixPlatformSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("NutanixPlatformStatus"): + return &configv1.NutanixPlatformStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("NutanixPrismElementEndpoint"): + return &configv1.NutanixPrismElementEndpointApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("NutanixPrismEndpoint"): + return &configv1.NutanixPrismEndpointApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("NutanixResourceIdentifier"): + return &configv1.NutanixResourceIdentifierApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("OAuth"): + return &configv1.OAuthApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("OAuthRemoteConnectionInfo"): + return &configv1.OAuthRemoteConnectionInfoApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("OAuthSpec"): + return &configv1.OAuthSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("OAuthTemplates"): + return &configv1.OAuthTemplatesApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ObjectReference"): + return &configv1.ObjectReferenceApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("OIDCClientConfig"): + return &configv1.OIDCClientConfigApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("OIDCClientReference"): + return &configv1.OIDCClientReferenceApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("OIDCClientStatus"): + return &configv1.OIDCClientStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("OIDCProvider"): + return &configv1.OIDCProviderApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("OpenIDClaims"): + return &configv1.OpenIDClaimsApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("OpenIDIdentityProvider"): + return &configv1.OpenIDIdentityProviderApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("OpenStackPlatformLoadBalancer"): + return &configv1.OpenStackPlatformLoadBalancerApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("OpenStackPlatformSpec"): + return &configv1.OpenStackPlatformSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("OpenStackPlatformStatus"): + return &configv1.OpenStackPlatformStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("OperandVersion"): + return &configv1.OperandVersionApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("OperatorHub"): + return &configv1.OperatorHubApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("OperatorHubSpec"): + return &configv1.OperatorHubSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("OperatorHubStatus"): + return &configv1.OperatorHubStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("OvirtPlatformLoadBalancer"): + return &configv1.OvirtPlatformLoadBalancerApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("OvirtPlatformStatus"): + return &configv1.OvirtPlatformStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("PlatformSpec"): + return &configv1.PlatformSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("PlatformStatus"): + return &configv1.PlatformStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("PowerVSPlatformSpec"): + return &configv1.PowerVSPlatformSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("PowerVSPlatformStatus"): + return &configv1.PowerVSPlatformStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("PowerVSServiceEndpoint"): + return &configv1.PowerVSServiceEndpointApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("PrefixedClaimMapping"): + return &configv1.PrefixedClaimMappingApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ProfileCustomizations"): + return &configv1.ProfileCustomizationsApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Project"): + return &configv1.ProjectApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ProjectSpec"): + return &configv1.ProjectSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("PromQLClusterCondition"): + return &configv1.PromQLClusterConditionApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Proxy"): + return &configv1.ProxyApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ProxySpec"): + return &configv1.ProxySpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ProxyStatus"): + return &configv1.ProxyStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("RegistryLocation"): + return &configv1.RegistryLocationApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("RegistrySources"): + return &configv1.RegistrySourcesApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Release"): + return &configv1.ReleaseApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("RepositoryDigestMirrors"): + return &configv1.RepositoryDigestMirrorsApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("RequestHeaderIdentityProvider"): + return &configv1.RequestHeaderIdentityProviderApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("RequiredHSTSPolicy"): + return &configv1.RequiredHSTSPolicyApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Scheduler"): + return &configv1.SchedulerApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("SchedulerSpec"): + return &configv1.SchedulerSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("SecretNameReference"): + return &configv1.SecretNameReferenceApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("SignatureStore"): + return &configv1.SignatureStoreApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("TemplateReference"): + return &configv1.TemplateReferenceApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("TLSProfileSpec"): + return &configv1.TLSProfileSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("TLSSecurityProfile"): + return &configv1.TLSSecurityProfileApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("TokenClaimMapping"): + return &configv1.TokenClaimMappingApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("TokenClaimMappings"): + return &configv1.TokenClaimMappingsApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("TokenClaimValidationRule"): + return &configv1.TokenClaimValidationRuleApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("TokenConfig"): + return &configv1.TokenConfigApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("TokenIssuer"): + return &configv1.TokenIssuerApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("TokenRequiredClaim"): + return &configv1.TokenRequiredClaimApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Update"): + return &configv1.UpdateApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("UpdateHistory"): + return &configv1.UpdateHistoryApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("UsernameClaimMapping"): + return &configv1.UsernameClaimMappingApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("UsernamePrefix"): + return &configv1.UsernamePrefixApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("VSphereFailureDomainHostGroup"): + return &configv1.VSphereFailureDomainHostGroupApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("VSphereFailureDomainRegionAffinity"): + return &configv1.VSphereFailureDomainRegionAffinityApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("VSphereFailureDomainZoneAffinity"): + return &configv1.VSphereFailureDomainZoneAffinityApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("VSpherePlatformFailureDomainSpec"): + return &configv1.VSpherePlatformFailureDomainSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("VSpherePlatformLoadBalancer"): + return &configv1.VSpherePlatformLoadBalancerApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("VSpherePlatformNodeNetworking"): + return &configv1.VSpherePlatformNodeNetworkingApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("VSpherePlatformNodeNetworkingSpec"): + return &configv1.VSpherePlatformNodeNetworkingSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("VSpherePlatformSpec"): + return &configv1.VSpherePlatformSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("VSpherePlatformStatus"): + return &configv1.VSpherePlatformStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("VSpherePlatformTopology"): + return &configv1.VSpherePlatformTopologyApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("VSpherePlatformVCenterSpec"): + return &configv1.VSpherePlatformVCenterSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("WebhookTokenAuthenticator"): + return &configv1.WebhookTokenAuthenticatorApplyConfiguration{} + + // Group=config.openshift.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithKind("Backup"): + return &configv1alpha1.BackupApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("BackupSpec"): + return &configv1alpha1.BackupSpecApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ClusterImagePolicy"): + return &configv1alpha1.ClusterImagePolicyApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ClusterImagePolicySpec"): + return &configv1alpha1.ClusterImagePolicySpecApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ClusterImagePolicyStatus"): + return &configv1alpha1.ClusterImagePolicyStatusApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("EtcdBackupSpec"): + return &configv1alpha1.EtcdBackupSpecApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("FulcioCAWithRekor"): + return &configv1alpha1.FulcioCAWithRekorApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("GatherConfig"): + return &configv1alpha1.GatherConfigApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ImagePolicy"): + return &configv1alpha1.ImagePolicyApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ImagePolicySpec"): + return &configv1alpha1.ImagePolicySpecApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ImagePolicyStatus"): + return &configv1alpha1.ImagePolicyStatusApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("InsightsDataGather"): + return &configv1alpha1.InsightsDataGatherApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("InsightsDataGatherSpec"): + return &configv1alpha1.InsightsDataGatherSpecApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("Policy"): + return &configv1alpha1.PolicyApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("PolicyFulcioSubject"): + return &configv1alpha1.PolicyFulcioSubjectApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("PolicyIdentity"): + return &configv1alpha1.PolicyIdentityApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("PolicyMatchExactRepository"): + return &configv1alpha1.PolicyMatchExactRepositoryApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("PolicyMatchRemapIdentity"): + return &configv1alpha1.PolicyMatchRemapIdentityApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("PolicyRootOfTrust"): + return &configv1alpha1.PolicyRootOfTrustApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("PublicKey"): + return &configv1alpha1.PublicKeyApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("RetentionNumberConfig"): + return &configv1alpha1.RetentionNumberConfigApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("RetentionPolicy"): + return &configv1alpha1.RetentionPolicyApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("RetentionSizeConfig"): + return &configv1alpha1.RetentionSizeConfigApplyConfiguration{} + + } + return nil +} + +func NewTypeConverter(scheme *runtime.Scheme) *testing.TypeConverter { + return &testing.TypeConverter{Scheme: scheme, TypeResolver: internal.Parser()} +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/clientset.go new file mode 100644 index 0000000000000..f9ed357b64055 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/clientset.go @@ -0,0 +1,117 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + fmt "fmt" + http "net/http" + + configv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + configv1alpha1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + ConfigV1() configv1.ConfigV1Interface + ConfigV1alpha1() configv1alpha1.ConfigV1alpha1Interface +} + +// Clientset contains the clients for groups. +type Clientset struct { + *discovery.DiscoveryClient + configV1 *configv1.ConfigV1Client + configV1alpha1 *configv1alpha1.ConfigV1alpha1Client +} + +// ConfigV1 retrieves the ConfigV1Client +func (c *Clientset) ConfigV1() configv1.ConfigV1Interface { + return c.configV1 +} + +// ConfigV1alpha1 retrieves the ConfigV1alpha1Client +func (c *Clientset) ConfigV1alpha1() configv1alpha1.ConfigV1alpha1Interface { + return c.configV1alpha1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.configV1, err = configv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + cs.configV1alpha1, err = configv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.configV1 = configv1.New(c) + cs.configV1alpha1 = configv1alpha1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 0000000000000..ddf12da1e7ecd --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,113 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + applyconfigurations "github.com/openshift/client-go/config/applyconfigurations" + clientset "github.com/openshift/client-go/config/clientset/versioned" + configv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + fakeconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake" + configv1alpha1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" + fakeconfigv1alpha1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +// +// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. +// via --with-applyconfig). +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +// NewClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewClientset(objects ...runtime.Object) *Clientset { + o := testing.NewFieldManagedObjectTracker( + scheme, + codecs.UniversalDecoder(), + applyconfigurations.NewTypeConverter(scheme), + ) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) + +// ConfigV1 retrieves the ConfigV1Client +func (c *Clientset) ConfigV1() configv1.ConfigV1Interface { + return &fakeconfigv1.FakeConfigV1{Fake: &c.Fake} +} + +// ConfigV1alpha1 retrieves the ConfigV1alpha1Client +func (c *Clientset) ConfigV1alpha1() configv1alpha1.ConfigV1alpha1Interface { + return &fakeconfigv1alpha1.FakeConfigV1alpha1{Fake: &c.Fake} +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/fake/doc.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/fake/doc.go new file mode 100644 index 0000000000000..3630ed1cd17db --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/fake/register.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/fake/register.go new file mode 100644 index 0000000000000..7489301098461 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/fake/register.go @@ -0,0 +1,42 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + configv1 "github.com/openshift/api/config/v1" + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +var localSchemeBuilder = runtime.SchemeBuilder{ + configv1.AddToScheme, + configv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000000..14db57a58f8d2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/scheme/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000000..6340555dd1d46 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/scheme/register.go @@ -0,0 +1,42 @@ +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + configv1 "github.com/openshift/api/config/v1" + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + configv1.AddToScheme, + configv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/apiserver.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/apiserver.go new file mode 100644 index 0000000000000..20e56733a6fe7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/apiserver.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// APIServersGetter has a method to return a APIServerInterface. +// A group's client should implement this interface. +type APIServersGetter interface { + APIServers() APIServerInterface +} + +// APIServerInterface has methods to work with APIServer resources. +type APIServerInterface interface { + Create(ctx context.Context, aPIServer *configv1.APIServer, opts metav1.CreateOptions) (*configv1.APIServer, error) + Update(ctx context.Context, aPIServer *configv1.APIServer, opts metav1.UpdateOptions) (*configv1.APIServer, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, aPIServer *configv1.APIServer, opts metav1.UpdateOptions) (*configv1.APIServer, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.APIServer, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.APIServerList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.APIServer, err error) + Apply(ctx context.Context, aPIServer *applyconfigurationsconfigv1.APIServerApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.APIServer, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, aPIServer *applyconfigurationsconfigv1.APIServerApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.APIServer, err error) + APIServerExpansion +} + +// aPIServers implements APIServerInterface +type aPIServers struct { + *gentype.ClientWithListAndApply[*configv1.APIServer, *configv1.APIServerList, *applyconfigurationsconfigv1.APIServerApplyConfiguration] +} + +// newAPIServers returns a APIServers +func newAPIServers(c *ConfigV1Client) *aPIServers { + return &aPIServers{ + gentype.NewClientWithListAndApply[*configv1.APIServer, *configv1.APIServerList, *applyconfigurationsconfigv1.APIServerApplyConfiguration]( + "apiservers", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.APIServer { return &configv1.APIServer{} }, + func() *configv1.APIServerList { return &configv1.APIServerList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/authentication.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/authentication.go new file mode 100644 index 0000000000000..f2f9cae610113 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/authentication.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// AuthenticationsGetter has a method to return a AuthenticationInterface. +// A group's client should implement this interface. +type AuthenticationsGetter interface { + Authentications() AuthenticationInterface +} + +// AuthenticationInterface has methods to work with Authentication resources. +type AuthenticationInterface interface { + Create(ctx context.Context, authentication *configv1.Authentication, opts metav1.CreateOptions) (*configv1.Authentication, error) + Update(ctx context.Context, authentication *configv1.Authentication, opts metav1.UpdateOptions) (*configv1.Authentication, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, authentication *configv1.Authentication, opts metav1.UpdateOptions) (*configv1.Authentication, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.Authentication, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.AuthenticationList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.Authentication, err error) + Apply(ctx context.Context, authentication *applyconfigurationsconfigv1.AuthenticationApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Authentication, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, authentication *applyconfigurationsconfigv1.AuthenticationApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Authentication, err error) + AuthenticationExpansion +} + +// authentications implements AuthenticationInterface +type authentications struct { + *gentype.ClientWithListAndApply[*configv1.Authentication, *configv1.AuthenticationList, *applyconfigurationsconfigv1.AuthenticationApplyConfiguration] +} + +// newAuthentications returns a Authentications +func newAuthentications(c *ConfigV1Client) *authentications { + return &authentications{ + gentype.NewClientWithListAndApply[*configv1.Authentication, *configv1.AuthenticationList, *applyconfigurationsconfigv1.AuthenticationApplyConfiguration]( + "authentications", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.Authentication { return &configv1.Authentication{} }, + func() *configv1.AuthenticationList { return &configv1.AuthenticationList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/build.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/build.go new file mode 100644 index 0000000000000..6e144b1f218d0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/build.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// BuildsGetter has a method to return a BuildInterface. +// A group's client should implement this interface. +type BuildsGetter interface { + Builds() BuildInterface +} + +// BuildInterface has methods to work with Build resources. +type BuildInterface interface { + Create(ctx context.Context, build *configv1.Build, opts metav1.CreateOptions) (*configv1.Build, error) + Update(ctx context.Context, build *configv1.Build, opts metav1.UpdateOptions) (*configv1.Build, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.Build, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.BuildList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.Build, err error) + Apply(ctx context.Context, build *applyconfigurationsconfigv1.BuildApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Build, err error) + BuildExpansion +} + +// builds implements BuildInterface +type builds struct { + *gentype.ClientWithListAndApply[*configv1.Build, *configv1.BuildList, *applyconfigurationsconfigv1.BuildApplyConfiguration] +} + +// newBuilds returns a Builds +func newBuilds(c *ConfigV1Client) *builds { + return &builds{ + gentype.NewClientWithListAndApply[*configv1.Build, *configv1.BuildList, *applyconfigurationsconfigv1.BuildApplyConfiguration]( + "builds", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.Build { return &configv1.Build{} }, + func() *configv1.BuildList { return &configv1.BuildList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusteroperator.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusteroperator.go new file mode 100644 index 0000000000000..a2f03a5020e91 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusteroperator.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ClusterOperatorsGetter has a method to return a ClusterOperatorInterface. +// A group's client should implement this interface. +type ClusterOperatorsGetter interface { + ClusterOperators() ClusterOperatorInterface +} + +// ClusterOperatorInterface has methods to work with ClusterOperator resources. +type ClusterOperatorInterface interface { + Create(ctx context.Context, clusterOperator *configv1.ClusterOperator, opts metav1.CreateOptions) (*configv1.ClusterOperator, error) + Update(ctx context.Context, clusterOperator *configv1.ClusterOperator, opts metav1.UpdateOptions) (*configv1.ClusterOperator, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, clusterOperator *configv1.ClusterOperator, opts metav1.UpdateOptions) (*configv1.ClusterOperator, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.ClusterOperator, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.ClusterOperatorList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.ClusterOperator, err error) + Apply(ctx context.Context, clusterOperator *applyconfigurationsconfigv1.ClusterOperatorApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.ClusterOperator, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, clusterOperator *applyconfigurationsconfigv1.ClusterOperatorApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.ClusterOperator, err error) + ClusterOperatorExpansion +} + +// clusterOperators implements ClusterOperatorInterface +type clusterOperators struct { + *gentype.ClientWithListAndApply[*configv1.ClusterOperator, *configv1.ClusterOperatorList, *applyconfigurationsconfigv1.ClusterOperatorApplyConfiguration] +} + +// newClusterOperators returns a ClusterOperators +func newClusterOperators(c *ConfigV1Client) *clusterOperators { + return &clusterOperators{ + gentype.NewClientWithListAndApply[*configv1.ClusterOperator, *configv1.ClusterOperatorList, *applyconfigurationsconfigv1.ClusterOperatorApplyConfiguration]( + "clusteroperators", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.ClusterOperator { return &configv1.ClusterOperator{} }, + func() *configv1.ClusterOperatorList { return &configv1.ClusterOperatorList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusterversion.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusterversion.go new file mode 100644 index 0000000000000..cb03327d9b337 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusterversion.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ClusterVersionsGetter has a method to return a ClusterVersionInterface. +// A group's client should implement this interface. +type ClusterVersionsGetter interface { + ClusterVersions() ClusterVersionInterface +} + +// ClusterVersionInterface has methods to work with ClusterVersion resources. +type ClusterVersionInterface interface { + Create(ctx context.Context, clusterVersion *configv1.ClusterVersion, opts metav1.CreateOptions) (*configv1.ClusterVersion, error) + Update(ctx context.Context, clusterVersion *configv1.ClusterVersion, opts metav1.UpdateOptions) (*configv1.ClusterVersion, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, clusterVersion *configv1.ClusterVersion, opts metav1.UpdateOptions) (*configv1.ClusterVersion, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.ClusterVersion, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.ClusterVersionList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.ClusterVersion, err error) + Apply(ctx context.Context, clusterVersion *applyconfigurationsconfigv1.ClusterVersionApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.ClusterVersion, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, clusterVersion *applyconfigurationsconfigv1.ClusterVersionApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.ClusterVersion, err error) + ClusterVersionExpansion +} + +// clusterVersions implements ClusterVersionInterface +type clusterVersions struct { + *gentype.ClientWithListAndApply[*configv1.ClusterVersion, *configv1.ClusterVersionList, *applyconfigurationsconfigv1.ClusterVersionApplyConfiguration] +} + +// newClusterVersions returns a ClusterVersions +func newClusterVersions(c *ConfigV1Client) *clusterVersions { + return &clusterVersions{ + gentype.NewClientWithListAndApply[*configv1.ClusterVersion, *configv1.ClusterVersionList, *applyconfigurationsconfigv1.ClusterVersionApplyConfiguration]( + "clusterversions", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.ClusterVersion { return &configv1.ClusterVersion{} }, + func() *configv1.ClusterVersionList { return &configv1.ClusterVersionList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go new file mode 100644 index 0000000000000..bbb0b312ee7b4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go @@ -0,0 +1,191 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + http "net/http" + + configv1 "github.com/openshift/api/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type ConfigV1Interface interface { + RESTClient() rest.Interface + APIServersGetter + AuthenticationsGetter + BuildsGetter + ClusterOperatorsGetter + ClusterVersionsGetter + ConsolesGetter + DNSesGetter + FeatureGatesGetter + ImagesGetter + ImageContentPoliciesGetter + ImageDigestMirrorSetsGetter + ImageTagMirrorSetsGetter + InfrastructuresGetter + IngressesGetter + NetworksGetter + NodesGetter + OAuthsGetter + OperatorHubsGetter + ProjectsGetter + ProxiesGetter + SchedulersGetter +} + +// ConfigV1Client is used to interact with features provided by the config.openshift.io group. +type ConfigV1Client struct { + restClient rest.Interface +} + +func (c *ConfigV1Client) APIServers() APIServerInterface { + return newAPIServers(c) +} + +func (c *ConfigV1Client) Authentications() AuthenticationInterface { + return newAuthentications(c) +} + +func (c *ConfigV1Client) Builds() BuildInterface { + return newBuilds(c) +} + +func (c *ConfigV1Client) ClusterOperators() ClusterOperatorInterface { + return newClusterOperators(c) +} + +func (c *ConfigV1Client) ClusterVersions() ClusterVersionInterface { + return newClusterVersions(c) +} + +func (c *ConfigV1Client) Consoles() ConsoleInterface { + return newConsoles(c) +} + +func (c *ConfigV1Client) DNSes() DNSInterface { + return newDNSes(c) +} + +func (c *ConfigV1Client) FeatureGates() FeatureGateInterface { + return newFeatureGates(c) +} + +func (c *ConfigV1Client) Images() ImageInterface { + return newImages(c) +} + +func (c *ConfigV1Client) ImageContentPolicies() ImageContentPolicyInterface { + return newImageContentPolicies(c) +} + +func (c *ConfigV1Client) ImageDigestMirrorSets() ImageDigestMirrorSetInterface { + return newImageDigestMirrorSets(c) +} + +func (c *ConfigV1Client) ImageTagMirrorSets() ImageTagMirrorSetInterface { + return newImageTagMirrorSets(c) +} + +func (c *ConfigV1Client) Infrastructures() InfrastructureInterface { + return newInfrastructures(c) +} + +func (c *ConfigV1Client) Ingresses() IngressInterface { + return newIngresses(c) +} + +func (c *ConfigV1Client) Networks() NetworkInterface { + return newNetworks(c) +} + +func (c *ConfigV1Client) Nodes() NodeInterface { + return newNodes(c) +} + +func (c *ConfigV1Client) OAuths() OAuthInterface { + return newOAuths(c) +} + +func (c *ConfigV1Client) OperatorHubs() OperatorHubInterface { + return newOperatorHubs(c) +} + +func (c *ConfigV1Client) Projects() ProjectInterface { + return newProjects(c) +} + +func (c *ConfigV1Client) Proxies() ProxyInterface { + return newProxies(c) +} + +func (c *ConfigV1Client) Schedulers() SchedulerInterface { + return newSchedulers(c) +} + +// NewForConfig creates a new ConfigV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*ConfigV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ConfigV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ConfigV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &ConfigV1Client{client}, nil +} + +// NewForConfigOrDie creates a new ConfigV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ConfigV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ConfigV1Client for the given RESTClient. +func New(c rest.Interface) *ConfigV1Client { + return &ConfigV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := configv1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ConfigV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/console.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/console.go new file mode 100644 index 0000000000000..ead87be189eee --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/console.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ConsolesGetter has a method to return a ConsoleInterface. +// A group's client should implement this interface. +type ConsolesGetter interface { + Consoles() ConsoleInterface +} + +// ConsoleInterface has methods to work with Console resources. +type ConsoleInterface interface { + Create(ctx context.Context, console *configv1.Console, opts metav1.CreateOptions) (*configv1.Console, error) + Update(ctx context.Context, console *configv1.Console, opts metav1.UpdateOptions) (*configv1.Console, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, console *configv1.Console, opts metav1.UpdateOptions) (*configv1.Console, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.Console, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.ConsoleList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.Console, err error) + Apply(ctx context.Context, console *applyconfigurationsconfigv1.ConsoleApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Console, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, console *applyconfigurationsconfigv1.ConsoleApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Console, err error) + ConsoleExpansion +} + +// consoles implements ConsoleInterface +type consoles struct { + *gentype.ClientWithListAndApply[*configv1.Console, *configv1.ConsoleList, *applyconfigurationsconfigv1.ConsoleApplyConfiguration] +} + +// newConsoles returns a Consoles +func newConsoles(c *ConfigV1Client) *consoles { + return &consoles{ + gentype.NewClientWithListAndApply[*configv1.Console, *configv1.ConsoleList, *applyconfigurationsconfigv1.ConsoleApplyConfiguration]( + "consoles", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.Console { return &configv1.Console{} }, + func() *configv1.ConsoleList { return &configv1.ConsoleList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/dns.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/dns.go new file mode 100644 index 0000000000000..76efd86104f92 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/dns.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// DNSesGetter has a method to return a DNSInterface. +// A group's client should implement this interface. +type DNSesGetter interface { + DNSes() DNSInterface +} + +// DNSInterface has methods to work with DNS resources. +type DNSInterface interface { + Create(ctx context.Context, dNS *configv1.DNS, opts metav1.CreateOptions) (*configv1.DNS, error) + Update(ctx context.Context, dNS *configv1.DNS, opts metav1.UpdateOptions) (*configv1.DNS, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, dNS *configv1.DNS, opts metav1.UpdateOptions) (*configv1.DNS, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.DNS, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.DNSList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.DNS, err error) + Apply(ctx context.Context, dNS *applyconfigurationsconfigv1.DNSApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.DNS, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, dNS *applyconfigurationsconfigv1.DNSApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.DNS, err error) + DNSExpansion +} + +// dNSes implements DNSInterface +type dNSes struct { + *gentype.ClientWithListAndApply[*configv1.DNS, *configv1.DNSList, *applyconfigurationsconfigv1.DNSApplyConfiguration] +} + +// newDNSes returns a DNSes +func newDNSes(c *ConfigV1Client) *dNSes { + return &dNSes{ + gentype.NewClientWithListAndApply[*configv1.DNS, *configv1.DNSList, *applyconfigurationsconfigv1.DNSApplyConfiguration]( + "dnses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.DNS { return &configv1.DNS{} }, + func() *configv1.DNSList { return &configv1.DNSList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/doc.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/doc.go new file mode 100644 index 0000000000000..225e6b2be34f2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/doc.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/doc.go new file mode 100644 index 0000000000000..2b5ba4c8e4422 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_apiserver.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_apiserver.go new file mode 100644 index 0000000000000..577b67da93ded --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_apiserver.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + typedconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeAPIServers implements APIServerInterface +type fakeAPIServers struct { + *gentype.FakeClientWithListAndApply[*v1.APIServer, *v1.APIServerList, *configv1.APIServerApplyConfiguration] + Fake *FakeConfigV1 +} + +func newFakeAPIServers(fake *FakeConfigV1) typedconfigv1.APIServerInterface { + return &fakeAPIServers{ + gentype.NewFakeClientWithListAndApply[*v1.APIServer, *v1.APIServerList, *configv1.APIServerApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("apiservers"), + v1.SchemeGroupVersion.WithKind("APIServer"), + func() *v1.APIServer { return &v1.APIServer{} }, + func() *v1.APIServerList { return &v1.APIServerList{} }, + func(dst, src *v1.APIServerList) { dst.ListMeta = src.ListMeta }, + func(list *v1.APIServerList) []*v1.APIServer { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.APIServerList, items []*v1.APIServer) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_authentication.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_authentication.go new file mode 100644 index 0000000000000..f81b4414397b5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_authentication.go @@ -0,0 +1,35 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + typedconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeAuthentications implements AuthenticationInterface +type fakeAuthentications struct { + *gentype.FakeClientWithListAndApply[*v1.Authentication, *v1.AuthenticationList, *configv1.AuthenticationApplyConfiguration] + Fake *FakeConfigV1 +} + +func newFakeAuthentications(fake *FakeConfigV1) typedconfigv1.AuthenticationInterface { + return &fakeAuthentications{ + gentype.NewFakeClientWithListAndApply[*v1.Authentication, *v1.AuthenticationList, *configv1.AuthenticationApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("authentications"), + v1.SchemeGroupVersion.WithKind("Authentication"), + func() *v1.Authentication { return &v1.Authentication{} }, + func() *v1.AuthenticationList { return &v1.AuthenticationList{} }, + func(dst, src *v1.AuthenticationList) { dst.ListMeta = src.ListMeta }, + func(list *v1.AuthenticationList) []*v1.Authentication { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.AuthenticationList, items []*v1.Authentication) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_build.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_build.go new file mode 100644 index 0000000000000..590e57d8e2377 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_build.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + typedconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeBuilds implements BuildInterface +type fakeBuilds struct { + *gentype.FakeClientWithListAndApply[*v1.Build, *v1.BuildList, *configv1.BuildApplyConfiguration] + Fake *FakeConfigV1 +} + +func newFakeBuilds(fake *FakeConfigV1) typedconfigv1.BuildInterface { + return &fakeBuilds{ + gentype.NewFakeClientWithListAndApply[*v1.Build, *v1.BuildList, *configv1.BuildApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("builds"), + v1.SchemeGroupVersion.WithKind("Build"), + func() *v1.Build { return &v1.Build{} }, + func() *v1.BuildList { return &v1.BuildList{} }, + func(dst, src *v1.BuildList) { dst.ListMeta = src.ListMeta }, + func(list *v1.BuildList) []*v1.Build { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.BuildList, items []*v1.Build) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_clusteroperator.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_clusteroperator.go new file mode 100644 index 0000000000000..ac5a30983dc56 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_clusteroperator.go @@ -0,0 +1,35 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + typedconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeClusterOperators implements ClusterOperatorInterface +type fakeClusterOperators struct { + *gentype.FakeClientWithListAndApply[*v1.ClusterOperator, *v1.ClusterOperatorList, *configv1.ClusterOperatorApplyConfiguration] + Fake *FakeConfigV1 +} + +func newFakeClusterOperators(fake *FakeConfigV1) typedconfigv1.ClusterOperatorInterface { + return &fakeClusterOperators{ + gentype.NewFakeClientWithListAndApply[*v1.ClusterOperator, *v1.ClusterOperatorList, *configv1.ClusterOperatorApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("clusteroperators"), + v1.SchemeGroupVersion.WithKind("ClusterOperator"), + func() *v1.ClusterOperator { return &v1.ClusterOperator{} }, + func() *v1.ClusterOperatorList { return &v1.ClusterOperatorList{} }, + func(dst, src *v1.ClusterOperatorList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ClusterOperatorList) []*v1.ClusterOperator { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.ClusterOperatorList, items []*v1.ClusterOperator) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_clusterversion.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_clusterversion.go new file mode 100644 index 0000000000000..6bb123d7f9432 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_clusterversion.go @@ -0,0 +1,35 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + typedconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeClusterVersions implements ClusterVersionInterface +type fakeClusterVersions struct { + *gentype.FakeClientWithListAndApply[*v1.ClusterVersion, *v1.ClusterVersionList, *configv1.ClusterVersionApplyConfiguration] + Fake *FakeConfigV1 +} + +func newFakeClusterVersions(fake *FakeConfigV1) typedconfigv1.ClusterVersionInterface { + return &fakeClusterVersions{ + gentype.NewFakeClientWithListAndApply[*v1.ClusterVersion, *v1.ClusterVersionList, *configv1.ClusterVersionApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("clusterversions"), + v1.SchemeGroupVersion.WithKind("ClusterVersion"), + func() *v1.ClusterVersion { return &v1.ClusterVersion{} }, + func() *v1.ClusterVersionList { return &v1.ClusterVersionList{} }, + func(dst, src *v1.ClusterVersionList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ClusterVersionList) []*v1.ClusterVersion { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.ClusterVersionList, items []*v1.ClusterVersion) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_config_client.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_config_client.go new file mode 100644 index 0000000000000..6253194636228 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_config_client.go @@ -0,0 +1,104 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeConfigV1 struct { + *testing.Fake +} + +func (c *FakeConfigV1) APIServers() v1.APIServerInterface { + return newFakeAPIServers(c) +} + +func (c *FakeConfigV1) Authentications() v1.AuthenticationInterface { + return newFakeAuthentications(c) +} + +func (c *FakeConfigV1) Builds() v1.BuildInterface { + return newFakeBuilds(c) +} + +func (c *FakeConfigV1) ClusterOperators() v1.ClusterOperatorInterface { + return newFakeClusterOperators(c) +} + +func (c *FakeConfigV1) ClusterVersions() v1.ClusterVersionInterface { + return newFakeClusterVersions(c) +} + +func (c *FakeConfigV1) Consoles() v1.ConsoleInterface { + return newFakeConsoles(c) +} + +func (c *FakeConfigV1) DNSes() v1.DNSInterface { + return newFakeDNSes(c) +} + +func (c *FakeConfigV1) FeatureGates() v1.FeatureGateInterface { + return newFakeFeatureGates(c) +} + +func (c *FakeConfigV1) Images() v1.ImageInterface { + return newFakeImages(c) +} + +func (c *FakeConfigV1) ImageContentPolicies() v1.ImageContentPolicyInterface { + return newFakeImageContentPolicies(c) +} + +func (c *FakeConfigV1) ImageDigestMirrorSets() v1.ImageDigestMirrorSetInterface { + return newFakeImageDigestMirrorSets(c) +} + +func (c *FakeConfigV1) ImageTagMirrorSets() v1.ImageTagMirrorSetInterface { + return newFakeImageTagMirrorSets(c) +} + +func (c *FakeConfigV1) Infrastructures() v1.InfrastructureInterface { + return newFakeInfrastructures(c) +} + +func (c *FakeConfigV1) Ingresses() v1.IngressInterface { + return newFakeIngresses(c) +} + +func (c *FakeConfigV1) Networks() v1.NetworkInterface { + return newFakeNetworks(c) +} + +func (c *FakeConfigV1) Nodes() v1.NodeInterface { + return newFakeNodes(c) +} + +func (c *FakeConfigV1) OAuths() v1.OAuthInterface { + return newFakeOAuths(c) +} + +func (c *FakeConfigV1) OperatorHubs() v1.OperatorHubInterface { + return newFakeOperatorHubs(c) +} + +func (c *FakeConfigV1) Projects() v1.ProjectInterface { + return newFakeProjects(c) +} + +func (c *FakeConfigV1) Proxies() v1.ProxyInterface { + return newFakeProxies(c) +} + +func (c *FakeConfigV1) Schedulers() v1.SchedulerInterface { + return newFakeSchedulers(c) +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeConfigV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_console.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_console.go new file mode 100644 index 0000000000000..1f1cbdd72668d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_console.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + typedconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeConsoles implements ConsoleInterface +type fakeConsoles struct { + *gentype.FakeClientWithListAndApply[*v1.Console, *v1.ConsoleList, *configv1.ConsoleApplyConfiguration] + Fake *FakeConfigV1 +} + +func newFakeConsoles(fake *FakeConfigV1) typedconfigv1.ConsoleInterface { + return &fakeConsoles{ + gentype.NewFakeClientWithListAndApply[*v1.Console, *v1.ConsoleList, *configv1.ConsoleApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("consoles"), + v1.SchemeGroupVersion.WithKind("Console"), + func() *v1.Console { return &v1.Console{} }, + func() *v1.ConsoleList { return &v1.ConsoleList{} }, + func(dst, src *v1.ConsoleList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ConsoleList) []*v1.Console { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.ConsoleList, items []*v1.Console) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_dns.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_dns.go new file mode 100644 index 0000000000000..5d998d6344363 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_dns.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + typedconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeDNSes implements DNSInterface +type fakeDNSes struct { + *gentype.FakeClientWithListAndApply[*v1.DNS, *v1.DNSList, *configv1.DNSApplyConfiguration] + Fake *FakeConfigV1 +} + +func newFakeDNSes(fake *FakeConfigV1) typedconfigv1.DNSInterface { + return &fakeDNSes{ + gentype.NewFakeClientWithListAndApply[*v1.DNS, *v1.DNSList, *configv1.DNSApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("dnses"), + v1.SchemeGroupVersion.WithKind("DNS"), + func() *v1.DNS { return &v1.DNS{} }, + func() *v1.DNSList { return &v1.DNSList{} }, + func(dst, src *v1.DNSList) { dst.ListMeta = src.ListMeta }, + func(list *v1.DNSList) []*v1.DNS { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.DNSList, items []*v1.DNS) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_featuregate.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_featuregate.go new file mode 100644 index 0000000000000..21dcf2e639af2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_featuregate.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + typedconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeFeatureGates implements FeatureGateInterface +type fakeFeatureGates struct { + *gentype.FakeClientWithListAndApply[*v1.FeatureGate, *v1.FeatureGateList, *configv1.FeatureGateApplyConfiguration] + Fake *FakeConfigV1 +} + +func newFakeFeatureGates(fake *FakeConfigV1) typedconfigv1.FeatureGateInterface { + return &fakeFeatureGates{ + gentype.NewFakeClientWithListAndApply[*v1.FeatureGate, *v1.FeatureGateList, *configv1.FeatureGateApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("featuregates"), + v1.SchemeGroupVersion.WithKind("FeatureGate"), + func() *v1.FeatureGate { return &v1.FeatureGate{} }, + func() *v1.FeatureGateList { return &v1.FeatureGateList{} }, + func(dst, src *v1.FeatureGateList) { dst.ListMeta = src.ListMeta }, + func(list *v1.FeatureGateList) []*v1.FeatureGate { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.FeatureGateList, items []*v1.FeatureGate) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_image.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_image.go new file mode 100644 index 0000000000000..107a96071e5ac --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_image.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + typedconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeImages implements ImageInterface +type fakeImages struct { + *gentype.FakeClientWithListAndApply[*v1.Image, *v1.ImageList, *configv1.ImageApplyConfiguration] + Fake *FakeConfigV1 +} + +func newFakeImages(fake *FakeConfigV1) typedconfigv1.ImageInterface { + return &fakeImages{ + gentype.NewFakeClientWithListAndApply[*v1.Image, *v1.ImageList, *configv1.ImageApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("images"), + v1.SchemeGroupVersion.WithKind("Image"), + func() *v1.Image { return &v1.Image{} }, + func() *v1.ImageList { return &v1.ImageList{} }, + func(dst, src *v1.ImageList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ImageList) []*v1.Image { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.ImageList, items []*v1.Image) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagecontentpolicy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagecontentpolicy.go new file mode 100644 index 0000000000000..a0771c4229fa1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagecontentpolicy.go @@ -0,0 +1,37 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + typedconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeImageContentPolicies implements ImageContentPolicyInterface +type fakeImageContentPolicies struct { + *gentype.FakeClientWithListAndApply[*v1.ImageContentPolicy, *v1.ImageContentPolicyList, *configv1.ImageContentPolicyApplyConfiguration] + Fake *FakeConfigV1 +} + +func newFakeImageContentPolicies(fake *FakeConfigV1) typedconfigv1.ImageContentPolicyInterface { + return &fakeImageContentPolicies{ + gentype.NewFakeClientWithListAndApply[*v1.ImageContentPolicy, *v1.ImageContentPolicyList, *configv1.ImageContentPolicyApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("imagecontentpolicies"), + v1.SchemeGroupVersion.WithKind("ImageContentPolicy"), + func() *v1.ImageContentPolicy { return &v1.ImageContentPolicy{} }, + func() *v1.ImageContentPolicyList { return &v1.ImageContentPolicyList{} }, + func(dst, src *v1.ImageContentPolicyList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ImageContentPolicyList) []*v1.ImageContentPolicy { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1.ImageContentPolicyList, items []*v1.ImageContentPolicy) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagedigestmirrorset.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagedigestmirrorset.go new file mode 100644 index 0000000000000..fda5bdcce2b56 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagedigestmirrorset.go @@ -0,0 +1,37 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + typedconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeImageDigestMirrorSets implements ImageDigestMirrorSetInterface +type fakeImageDigestMirrorSets struct { + *gentype.FakeClientWithListAndApply[*v1.ImageDigestMirrorSet, *v1.ImageDigestMirrorSetList, *configv1.ImageDigestMirrorSetApplyConfiguration] + Fake *FakeConfigV1 +} + +func newFakeImageDigestMirrorSets(fake *FakeConfigV1) typedconfigv1.ImageDigestMirrorSetInterface { + return &fakeImageDigestMirrorSets{ + gentype.NewFakeClientWithListAndApply[*v1.ImageDigestMirrorSet, *v1.ImageDigestMirrorSetList, *configv1.ImageDigestMirrorSetApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("imagedigestmirrorsets"), + v1.SchemeGroupVersion.WithKind("ImageDigestMirrorSet"), + func() *v1.ImageDigestMirrorSet { return &v1.ImageDigestMirrorSet{} }, + func() *v1.ImageDigestMirrorSetList { return &v1.ImageDigestMirrorSetList{} }, + func(dst, src *v1.ImageDigestMirrorSetList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ImageDigestMirrorSetList) []*v1.ImageDigestMirrorSet { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1.ImageDigestMirrorSetList, items []*v1.ImageDigestMirrorSet) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagetagmirrorset.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagetagmirrorset.go new file mode 100644 index 0000000000000..cea42e0448372 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagetagmirrorset.go @@ -0,0 +1,37 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + typedconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeImageTagMirrorSets implements ImageTagMirrorSetInterface +type fakeImageTagMirrorSets struct { + *gentype.FakeClientWithListAndApply[*v1.ImageTagMirrorSet, *v1.ImageTagMirrorSetList, *configv1.ImageTagMirrorSetApplyConfiguration] + Fake *FakeConfigV1 +} + +func newFakeImageTagMirrorSets(fake *FakeConfigV1) typedconfigv1.ImageTagMirrorSetInterface { + return &fakeImageTagMirrorSets{ + gentype.NewFakeClientWithListAndApply[*v1.ImageTagMirrorSet, *v1.ImageTagMirrorSetList, *configv1.ImageTagMirrorSetApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("imagetagmirrorsets"), + v1.SchemeGroupVersion.WithKind("ImageTagMirrorSet"), + func() *v1.ImageTagMirrorSet { return &v1.ImageTagMirrorSet{} }, + func() *v1.ImageTagMirrorSetList { return &v1.ImageTagMirrorSetList{} }, + func(dst, src *v1.ImageTagMirrorSetList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ImageTagMirrorSetList) []*v1.ImageTagMirrorSet { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1.ImageTagMirrorSetList, items []*v1.ImageTagMirrorSet) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_infrastructure.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_infrastructure.go new file mode 100644 index 0000000000000..b859d6798857b --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_infrastructure.go @@ -0,0 +1,35 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + typedconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeInfrastructures implements InfrastructureInterface +type fakeInfrastructures struct { + *gentype.FakeClientWithListAndApply[*v1.Infrastructure, *v1.InfrastructureList, *configv1.InfrastructureApplyConfiguration] + Fake *FakeConfigV1 +} + +func newFakeInfrastructures(fake *FakeConfigV1) typedconfigv1.InfrastructureInterface { + return &fakeInfrastructures{ + gentype.NewFakeClientWithListAndApply[*v1.Infrastructure, *v1.InfrastructureList, *configv1.InfrastructureApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("infrastructures"), + v1.SchemeGroupVersion.WithKind("Infrastructure"), + func() *v1.Infrastructure { return &v1.Infrastructure{} }, + func() *v1.InfrastructureList { return &v1.InfrastructureList{} }, + func(dst, src *v1.InfrastructureList) { dst.ListMeta = src.ListMeta }, + func(list *v1.InfrastructureList) []*v1.Infrastructure { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.InfrastructureList, items []*v1.Infrastructure) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_ingress.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_ingress.go new file mode 100644 index 0000000000000..5f7d16cda1d5e --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_ingress.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + typedconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeIngresses implements IngressInterface +type fakeIngresses struct { + *gentype.FakeClientWithListAndApply[*v1.Ingress, *v1.IngressList, *configv1.IngressApplyConfiguration] + Fake *FakeConfigV1 +} + +func newFakeIngresses(fake *FakeConfigV1) typedconfigv1.IngressInterface { + return &fakeIngresses{ + gentype.NewFakeClientWithListAndApply[*v1.Ingress, *v1.IngressList, *configv1.IngressApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("ingresses"), + v1.SchemeGroupVersion.WithKind("Ingress"), + func() *v1.Ingress { return &v1.Ingress{} }, + func() *v1.IngressList { return &v1.IngressList{} }, + func(dst, src *v1.IngressList) { dst.ListMeta = src.ListMeta }, + func(list *v1.IngressList) []*v1.Ingress { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.IngressList, items []*v1.Ingress) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_network.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_network.go new file mode 100644 index 0000000000000..446ce65db95d5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_network.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + typedconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeNetworks implements NetworkInterface +type fakeNetworks struct { + *gentype.FakeClientWithListAndApply[*v1.Network, *v1.NetworkList, *configv1.NetworkApplyConfiguration] + Fake *FakeConfigV1 +} + +func newFakeNetworks(fake *FakeConfigV1) typedconfigv1.NetworkInterface { + return &fakeNetworks{ + gentype.NewFakeClientWithListAndApply[*v1.Network, *v1.NetworkList, *configv1.NetworkApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("networks"), + v1.SchemeGroupVersion.WithKind("Network"), + func() *v1.Network { return &v1.Network{} }, + func() *v1.NetworkList { return &v1.NetworkList{} }, + func(dst, src *v1.NetworkList) { dst.ListMeta = src.ListMeta }, + func(list *v1.NetworkList) []*v1.Network { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.NetworkList, items []*v1.Network) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_node.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_node.go new file mode 100644 index 0000000000000..77aa20d2a9ce0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_node.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + typedconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeNodes implements NodeInterface +type fakeNodes struct { + *gentype.FakeClientWithListAndApply[*v1.Node, *v1.NodeList, *configv1.NodeApplyConfiguration] + Fake *FakeConfigV1 +} + +func newFakeNodes(fake *FakeConfigV1) typedconfigv1.NodeInterface { + return &fakeNodes{ + gentype.NewFakeClientWithListAndApply[*v1.Node, *v1.NodeList, *configv1.NodeApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("nodes"), + v1.SchemeGroupVersion.WithKind("Node"), + func() *v1.Node { return &v1.Node{} }, + func() *v1.NodeList { return &v1.NodeList{} }, + func(dst, src *v1.NodeList) { dst.ListMeta = src.ListMeta }, + func(list *v1.NodeList) []*v1.Node { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.NodeList, items []*v1.Node) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_oauth.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_oauth.go new file mode 100644 index 0000000000000..f4ab053b94e41 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_oauth.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + typedconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeOAuths implements OAuthInterface +type fakeOAuths struct { + *gentype.FakeClientWithListAndApply[*v1.OAuth, *v1.OAuthList, *configv1.OAuthApplyConfiguration] + Fake *FakeConfigV1 +} + +func newFakeOAuths(fake *FakeConfigV1) typedconfigv1.OAuthInterface { + return &fakeOAuths{ + gentype.NewFakeClientWithListAndApply[*v1.OAuth, *v1.OAuthList, *configv1.OAuthApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("oauths"), + v1.SchemeGroupVersion.WithKind("OAuth"), + func() *v1.OAuth { return &v1.OAuth{} }, + func() *v1.OAuthList { return &v1.OAuthList{} }, + func(dst, src *v1.OAuthList) { dst.ListMeta = src.ListMeta }, + func(list *v1.OAuthList) []*v1.OAuth { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.OAuthList, items []*v1.OAuth) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_operatorhub.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_operatorhub.go new file mode 100644 index 0000000000000..06989ba1d8185 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_operatorhub.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + typedconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeOperatorHubs implements OperatorHubInterface +type fakeOperatorHubs struct { + *gentype.FakeClientWithListAndApply[*v1.OperatorHub, *v1.OperatorHubList, *configv1.OperatorHubApplyConfiguration] + Fake *FakeConfigV1 +} + +func newFakeOperatorHubs(fake *FakeConfigV1) typedconfigv1.OperatorHubInterface { + return &fakeOperatorHubs{ + gentype.NewFakeClientWithListAndApply[*v1.OperatorHub, *v1.OperatorHubList, *configv1.OperatorHubApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("operatorhubs"), + v1.SchemeGroupVersion.WithKind("OperatorHub"), + func() *v1.OperatorHub { return &v1.OperatorHub{} }, + func() *v1.OperatorHubList { return &v1.OperatorHubList{} }, + func(dst, src *v1.OperatorHubList) { dst.ListMeta = src.ListMeta }, + func(list *v1.OperatorHubList) []*v1.OperatorHub { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.OperatorHubList, items []*v1.OperatorHub) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_project.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_project.go new file mode 100644 index 0000000000000..506f1527b873b --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_project.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + typedconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeProjects implements ProjectInterface +type fakeProjects struct { + *gentype.FakeClientWithListAndApply[*v1.Project, *v1.ProjectList, *configv1.ProjectApplyConfiguration] + Fake *FakeConfigV1 +} + +func newFakeProjects(fake *FakeConfigV1) typedconfigv1.ProjectInterface { + return &fakeProjects{ + gentype.NewFakeClientWithListAndApply[*v1.Project, *v1.ProjectList, *configv1.ProjectApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("projects"), + v1.SchemeGroupVersion.WithKind("Project"), + func() *v1.Project { return &v1.Project{} }, + func() *v1.ProjectList { return &v1.ProjectList{} }, + func(dst, src *v1.ProjectList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ProjectList) []*v1.Project { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.ProjectList, items []*v1.Project) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_proxy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_proxy.go new file mode 100644 index 0000000000000..6cb1cd0946b3d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_proxy.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + typedconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeProxies implements ProxyInterface +type fakeProxies struct { + *gentype.FakeClientWithListAndApply[*v1.Proxy, *v1.ProxyList, *configv1.ProxyApplyConfiguration] + Fake *FakeConfigV1 +} + +func newFakeProxies(fake *FakeConfigV1) typedconfigv1.ProxyInterface { + return &fakeProxies{ + gentype.NewFakeClientWithListAndApply[*v1.Proxy, *v1.ProxyList, *configv1.ProxyApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("proxies"), + v1.SchemeGroupVersion.WithKind("Proxy"), + func() *v1.Proxy { return &v1.Proxy{} }, + func() *v1.ProxyList { return &v1.ProxyList{} }, + func(dst, src *v1.ProxyList) { dst.ListMeta = src.ListMeta }, + func(list *v1.ProxyList) []*v1.Proxy { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.ProxyList, items []*v1.Proxy) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_scheduler.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_scheduler.go new file mode 100644 index 0000000000000..cba9733c94d56 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_scheduler.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + typedconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeSchedulers implements SchedulerInterface +type fakeSchedulers struct { + *gentype.FakeClientWithListAndApply[*v1.Scheduler, *v1.SchedulerList, *configv1.SchedulerApplyConfiguration] + Fake *FakeConfigV1 +} + +func newFakeSchedulers(fake *FakeConfigV1) typedconfigv1.SchedulerInterface { + return &fakeSchedulers{ + gentype.NewFakeClientWithListAndApply[*v1.Scheduler, *v1.SchedulerList, *configv1.SchedulerApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("schedulers"), + v1.SchemeGroupVersion.WithKind("Scheduler"), + func() *v1.Scheduler { return &v1.Scheduler{} }, + func() *v1.SchedulerList { return &v1.SchedulerList{} }, + func(dst, src *v1.SchedulerList) { dst.ListMeta = src.ListMeta }, + func(list *v1.SchedulerList) []*v1.Scheduler { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.SchedulerList, items []*v1.Scheduler) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/featuregate.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/featuregate.go new file mode 100644 index 0000000000000..2a41c2e738386 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/featuregate.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// FeatureGatesGetter has a method to return a FeatureGateInterface. +// A group's client should implement this interface. +type FeatureGatesGetter interface { + FeatureGates() FeatureGateInterface +} + +// FeatureGateInterface has methods to work with FeatureGate resources. +type FeatureGateInterface interface { + Create(ctx context.Context, featureGate *configv1.FeatureGate, opts metav1.CreateOptions) (*configv1.FeatureGate, error) + Update(ctx context.Context, featureGate *configv1.FeatureGate, opts metav1.UpdateOptions) (*configv1.FeatureGate, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, featureGate *configv1.FeatureGate, opts metav1.UpdateOptions) (*configv1.FeatureGate, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.FeatureGate, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.FeatureGateList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.FeatureGate, err error) + Apply(ctx context.Context, featureGate *applyconfigurationsconfigv1.FeatureGateApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.FeatureGate, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, featureGate *applyconfigurationsconfigv1.FeatureGateApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.FeatureGate, err error) + FeatureGateExpansion +} + +// featureGates implements FeatureGateInterface +type featureGates struct { + *gentype.ClientWithListAndApply[*configv1.FeatureGate, *configv1.FeatureGateList, *applyconfigurationsconfigv1.FeatureGateApplyConfiguration] +} + +// newFeatureGates returns a FeatureGates +func newFeatureGates(c *ConfigV1Client) *featureGates { + return &featureGates{ + gentype.NewClientWithListAndApply[*configv1.FeatureGate, *configv1.FeatureGateList, *applyconfigurationsconfigv1.FeatureGateApplyConfiguration]( + "featuregates", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.FeatureGate { return &configv1.FeatureGate{} }, + func() *configv1.FeatureGateList { return &configv1.FeatureGateList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go new file mode 100644 index 0000000000000..a56721ba9d349 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go @@ -0,0 +1,45 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type APIServerExpansion interface{} + +type AuthenticationExpansion interface{} + +type BuildExpansion interface{} + +type ClusterOperatorExpansion interface{} + +type ClusterVersionExpansion interface{} + +type ConsoleExpansion interface{} + +type DNSExpansion interface{} + +type FeatureGateExpansion interface{} + +type ImageExpansion interface{} + +type ImageContentPolicyExpansion interface{} + +type ImageDigestMirrorSetExpansion interface{} + +type ImageTagMirrorSetExpansion interface{} + +type InfrastructureExpansion interface{} + +type IngressExpansion interface{} + +type NetworkExpansion interface{} + +type NodeExpansion interface{} + +type OAuthExpansion interface{} + +type OperatorHubExpansion interface{} + +type ProjectExpansion interface{} + +type ProxyExpansion interface{} + +type SchedulerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/image.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/image.go new file mode 100644 index 0000000000000..2950a19c60169 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/image.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ImagesGetter has a method to return a ImageInterface. +// A group's client should implement this interface. +type ImagesGetter interface { + Images() ImageInterface +} + +// ImageInterface has methods to work with Image resources. +type ImageInterface interface { + Create(ctx context.Context, image *configv1.Image, opts metav1.CreateOptions) (*configv1.Image, error) + Update(ctx context.Context, image *configv1.Image, opts metav1.UpdateOptions) (*configv1.Image, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, image *configv1.Image, opts metav1.UpdateOptions) (*configv1.Image, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.Image, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.ImageList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.Image, err error) + Apply(ctx context.Context, image *applyconfigurationsconfigv1.ImageApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Image, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, image *applyconfigurationsconfigv1.ImageApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Image, err error) + ImageExpansion +} + +// images implements ImageInterface +type images struct { + *gentype.ClientWithListAndApply[*configv1.Image, *configv1.ImageList, *applyconfigurationsconfigv1.ImageApplyConfiguration] +} + +// newImages returns a Images +func newImages(c *ConfigV1Client) *images { + return &images{ + gentype.NewClientWithListAndApply[*configv1.Image, *configv1.ImageList, *applyconfigurationsconfigv1.ImageApplyConfiguration]( + "images", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.Image { return &configv1.Image{} }, + func() *configv1.ImageList { return &configv1.ImageList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagecontentpolicy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagecontentpolicy.go new file mode 100644 index 0000000000000..ce52d6c813b89 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagecontentpolicy.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ImageContentPoliciesGetter has a method to return a ImageContentPolicyInterface. +// A group's client should implement this interface. +type ImageContentPoliciesGetter interface { + ImageContentPolicies() ImageContentPolicyInterface +} + +// ImageContentPolicyInterface has methods to work with ImageContentPolicy resources. +type ImageContentPolicyInterface interface { + Create(ctx context.Context, imageContentPolicy *configv1.ImageContentPolicy, opts metav1.CreateOptions) (*configv1.ImageContentPolicy, error) + Update(ctx context.Context, imageContentPolicy *configv1.ImageContentPolicy, opts metav1.UpdateOptions) (*configv1.ImageContentPolicy, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.ImageContentPolicy, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.ImageContentPolicyList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.ImageContentPolicy, err error) + Apply(ctx context.Context, imageContentPolicy *applyconfigurationsconfigv1.ImageContentPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.ImageContentPolicy, err error) + ImageContentPolicyExpansion +} + +// imageContentPolicies implements ImageContentPolicyInterface +type imageContentPolicies struct { + *gentype.ClientWithListAndApply[*configv1.ImageContentPolicy, *configv1.ImageContentPolicyList, *applyconfigurationsconfigv1.ImageContentPolicyApplyConfiguration] +} + +// newImageContentPolicies returns a ImageContentPolicies +func newImageContentPolicies(c *ConfigV1Client) *imageContentPolicies { + return &imageContentPolicies{ + gentype.NewClientWithListAndApply[*configv1.ImageContentPolicy, *configv1.ImageContentPolicyList, *applyconfigurationsconfigv1.ImageContentPolicyApplyConfiguration]( + "imagecontentpolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.ImageContentPolicy { return &configv1.ImageContentPolicy{} }, + func() *configv1.ImageContentPolicyList { return &configv1.ImageContentPolicyList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagedigestmirrorset.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagedigestmirrorset.go new file mode 100644 index 0000000000000..70018dd7fc1c1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagedigestmirrorset.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ImageDigestMirrorSetsGetter has a method to return a ImageDigestMirrorSetInterface. +// A group's client should implement this interface. +type ImageDigestMirrorSetsGetter interface { + ImageDigestMirrorSets() ImageDigestMirrorSetInterface +} + +// ImageDigestMirrorSetInterface has methods to work with ImageDigestMirrorSet resources. +type ImageDigestMirrorSetInterface interface { + Create(ctx context.Context, imageDigestMirrorSet *configv1.ImageDigestMirrorSet, opts metav1.CreateOptions) (*configv1.ImageDigestMirrorSet, error) + Update(ctx context.Context, imageDigestMirrorSet *configv1.ImageDigestMirrorSet, opts metav1.UpdateOptions) (*configv1.ImageDigestMirrorSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, imageDigestMirrorSet *configv1.ImageDigestMirrorSet, opts metav1.UpdateOptions) (*configv1.ImageDigestMirrorSet, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.ImageDigestMirrorSet, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.ImageDigestMirrorSetList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.ImageDigestMirrorSet, err error) + Apply(ctx context.Context, imageDigestMirrorSet *applyconfigurationsconfigv1.ImageDigestMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.ImageDigestMirrorSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, imageDigestMirrorSet *applyconfigurationsconfigv1.ImageDigestMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.ImageDigestMirrorSet, err error) + ImageDigestMirrorSetExpansion +} + +// imageDigestMirrorSets implements ImageDigestMirrorSetInterface +type imageDigestMirrorSets struct { + *gentype.ClientWithListAndApply[*configv1.ImageDigestMirrorSet, *configv1.ImageDigestMirrorSetList, *applyconfigurationsconfigv1.ImageDigestMirrorSetApplyConfiguration] +} + +// newImageDigestMirrorSets returns a ImageDigestMirrorSets +func newImageDigestMirrorSets(c *ConfigV1Client) *imageDigestMirrorSets { + return &imageDigestMirrorSets{ + gentype.NewClientWithListAndApply[*configv1.ImageDigestMirrorSet, *configv1.ImageDigestMirrorSetList, *applyconfigurationsconfigv1.ImageDigestMirrorSetApplyConfiguration]( + "imagedigestmirrorsets", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.ImageDigestMirrorSet { return &configv1.ImageDigestMirrorSet{} }, + func() *configv1.ImageDigestMirrorSetList { return &configv1.ImageDigestMirrorSetList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagetagmirrorset.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagetagmirrorset.go new file mode 100644 index 0000000000000..ca3c6e0be7251 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagetagmirrorset.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ImageTagMirrorSetsGetter has a method to return a ImageTagMirrorSetInterface. +// A group's client should implement this interface. +type ImageTagMirrorSetsGetter interface { + ImageTagMirrorSets() ImageTagMirrorSetInterface +} + +// ImageTagMirrorSetInterface has methods to work with ImageTagMirrorSet resources. +type ImageTagMirrorSetInterface interface { + Create(ctx context.Context, imageTagMirrorSet *configv1.ImageTagMirrorSet, opts metav1.CreateOptions) (*configv1.ImageTagMirrorSet, error) + Update(ctx context.Context, imageTagMirrorSet *configv1.ImageTagMirrorSet, opts metav1.UpdateOptions) (*configv1.ImageTagMirrorSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, imageTagMirrorSet *configv1.ImageTagMirrorSet, opts metav1.UpdateOptions) (*configv1.ImageTagMirrorSet, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.ImageTagMirrorSet, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.ImageTagMirrorSetList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.ImageTagMirrorSet, err error) + Apply(ctx context.Context, imageTagMirrorSet *applyconfigurationsconfigv1.ImageTagMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.ImageTagMirrorSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, imageTagMirrorSet *applyconfigurationsconfigv1.ImageTagMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.ImageTagMirrorSet, err error) + ImageTagMirrorSetExpansion +} + +// imageTagMirrorSets implements ImageTagMirrorSetInterface +type imageTagMirrorSets struct { + *gentype.ClientWithListAndApply[*configv1.ImageTagMirrorSet, *configv1.ImageTagMirrorSetList, *applyconfigurationsconfigv1.ImageTagMirrorSetApplyConfiguration] +} + +// newImageTagMirrorSets returns a ImageTagMirrorSets +func newImageTagMirrorSets(c *ConfigV1Client) *imageTagMirrorSets { + return &imageTagMirrorSets{ + gentype.NewClientWithListAndApply[*configv1.ImageTagMirrorSet, *configv1.ImageTagMirrorSetList, *applyconfigurationsconfigv1.ImageTagMirrorSetApplyConfiguration]( + "imagetagmirrorsets", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.ImageTagMirrorSet { return &configv1.ImageTagMirrorSet{} }, + func() *configv1.ImageTagMirrorSetList { return &configv1.ImageTagMirrorSetList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/infrastructure.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/infrastructure.go new file mode 100644 index 0000000000000..eb307026ce993 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/infrastructure.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// InfrastructuresGetter has a method to return a InfrastructureInterface. +// A group's client should implement this interface. +type InfrastructuresGetter interface { + Infrastructures() InfrastructureInterface +} + +// InfrastructureInterface has methods to work with Infrastructure resources. +type InfrastructureInterface interface { + Create(ctx context.Context, infrastructure *configv1.Infrastructure, opts metav1.CreateOptions) (*configv1.Infrastructure, error) + Update(ctx context.Context, infrastructure *configv1.Infrastructure, opts metav1.UpdateOptions) (*configv1.Infrastructure, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, infrastructure *configv1.Infrastructure, opts metav1.UpdateOptions) (*configv1.Infrastructure, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.Infrastructure, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.InfrastructureList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.Infrastructure, err error) + Apply(ctx context.Context, infrastructure *applyconfigurationsconfigv1.InfrastructureApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Infrastructure, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, infrastructure *applyconfigurationsconfigv1.InfrastructureApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Infrastructure, err error) + InfrastructureExpansion +} + +// infrastructures implements InfrastructureInterface +type infrastructures struct { + *gentype.ClientWithListAndApply[*configv1.Infrastructure, *configv1.InfrastructureList, *applyconfigurationsconfigv1.InfrastructureApplyConfiguration] +} + +// newInfrastructures returns a Infrastructures +func newInfrastructures(c *ConfigV1Client) *infrastructures { + return &infrastructures{ + gentype.NewClientWithListAndApply[*configv1.Infrastructure, *configv1.InfrastructureList, *applyconfigurationsconfigv1.InfrastructureApplyConfiguration]( + "infrastructures", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.Infrastructure { return &configv1.Infrastructure{} }, + func() *configv1.InfrastructureList { return &configv1.InfrastructureList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/ingress.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/ingress.go new file mode 100644 index 0000000000000..81057042d2d78 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/ingress.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// IngressesGetter has a method to return a IngressInterface. +// A group's client should implement this interface. +type IngressesGetter interface { + Ingresses() IngressInterface +} + +// IngressInterface has methods to work with Ingress resources. +type IngressInterface interface { + Create(ctx context.Context, ingress *configv1.Ingress, opts metav1.CreateOptions) (*configv1.Ingress, error) + Update(ctx context.Context, ingress *configv1.Ingress, opts metav1.UpdateOptions) (*configv1.Ingress, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, ingress *configv1.Ingress, opts metav1.UpdateOptions) (*configv1.Ingress, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.Ingress, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.IngressList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.Ingress, err error) + Apply(ctx context.Context, ingress *applyconfigurationsconfigv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Ingress, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, ingress *applyconfigurationsconfigv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Ingress, err error) + IngressExpansion +} + +// ingresses implements IngressInterface +type ingresses struct { + *gentype.ClientWithListAndApply[*configv1.Ingress, *configv1.IngressList, *applyconfigurationsconfigv1.IngressApplyConfiguration] +} + +// newIngresses returns a Ingresses +func newIngresses(c *ConfigV1Client) *ingresses { + return &ingresses{ + gentype.NewClientWithListAndApply[*configv1.Ingress, *configv1.IngressList, *applyconfigurationsconfigv1.IngressApplyConfiguration]( + "ingresses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.Ingress { return &configv1.Ingress{} }, + func() *configv1.IngressList { return &configv1.IngressList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/network.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/network.go new file mode 100644 index 0000000000000..c58e0f211eb5d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/network.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// NetworksGetter has a method to return a NetworkInterface. +// A group's client should implement this interface. +type NetworksGetter interface { + Networks() NetworkInterface +} + +// NetworkInterface has methods to work with Network resources. +type NetworkInterface interface { + Create(ctx context.Context, network *configv1.Network, opts metav1.CreateOptions) (*configv1.Network, error) + Update(ctx context.Context, network *configv1.Network, opts metav1.UpdateOptions) (*configv1.Network, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, network *configv1.Network, opts metav1.UpdateOptions) (*configv1.Network, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.Network, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.NetworkList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.Network, err error) + Apply(ctx context.Context, network *applyconfigurationsconfigv1.NetworkApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Network, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, network *applyconfigurationsconfigv1.NetworkApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Network, err error) + NetworkExpansion +} + +// networks implements NetworkInterface +type networks struct { + *gentype.ClientWithListAndApply[*configv1.Network, *configv1.NetworkList, *applyconfigurationsconfigv1.NetworkApplyConfiguration] +} + +// newNetworks returns a Networks +func newNetworks(c *ConfigV1Client) *networks { + return &networks{ + gentype.NewClientWithListAndApply[*configv1.Network, *configv1.NetworkList, *applyconfigurationsconfigv1.NetworkApplyConfiguration]( + "networks", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.Network { return &configv1.Network{} }, + func() *configv1.NetworkList { return &configv1.NetworkList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/node.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/node.go new file mode 100644 index 0000000000000..b573b1598b1d7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/node.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// NodesGetter has a method to return a NodeInterface. +// A group's client should implement this interface. +type NodesGetter interface { + Nodes() NodeInterface +} + +// NodeInterface has methods to work with Node resources. +type NodeInterface interface { + Create(ctx context.Context, node *configv1.Node, opts metav1.CreateOptions) (*configv1.Node, error) + Update(ctx context.Context, node *configv1.Node, opts metav1.UpdateOptions) (*configv1.Node, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, node *configv1.Node, opts metav1.UpdateOptions) (*configv1.Node, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.Node, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.NodeList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.Node, err error) + Apply(ctx context.Context, node *applyconfigurationsconfigv1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Node, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, node *applyconfigurationsconfigv1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Node, err error) + NodeExpansion +} + +// nodes implements NodeInterface +type nodes struct { + *gentype.ClientWithListAndApply[*configv1.Node, *configv1.NodeList, *applyconfigurationsconfigv1.NodeApplyConfiguration] +} + +// newNodes returns a Nodes +func newNodes(c *ConfigV1Client) *nodes { + return &nodes{ + gentype.NewClientWithListAndApply[*configv1.Node, *configv1.NodeList, *applyconfigurationsconfigv1.NodeApplyConfiguration]( + "nodes", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.Node { return &configv1.Node{} }, + func() *configv1.NodeList { return &configv1.NodeList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/oauth.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/oauth.go new file mode 100644 index 0000000000000..755a938737225 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/oauth.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// OAuthsGetter has a method to return a OAuthInterface. +// A group's client should implement this interface. +type OAuthsGetter interface { + OAuths() OAuthInterface +} + +// OAuthInterface has methods to work with OAuth resources. +type OAuthInterface interface { + Create(ctx context.Context, oAuth *configv1.OAuth, opts metav1.CreateOptions) (*configv1.OAuth, error) + Update(ctx context.Context, oAuth *configv1.OAuth, opts metav1.UpdateOptions) (*configv1.OAuth, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, oAuth *configv1.OAuth, opts metav1.UpdateOptions) (*configv1.OAuth, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.OAuth, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.OAuthList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.OAuth, err error) + Apply(ctx context.Context, oAuth *applyconfigurationsconfigv1.OAuthApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.OAuth, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, oAuth *applyconfigurationsconfigv1.OAuthApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.OAuth, err error) + OAuthExpansion +} + +// oAuths implements OAuthInterface +type oAuths struct { + *gentype.ClientWithListAndApply[*configv1.OAuth, *configv1.OAuthList, *applyconfigurationsconfigv1.OAuthApplyConfiguration] +} + +// newOAuths returns a OAuths +func newOAuths(c *ConfigV1Client) *oAuths { + return &oAuths{ + gentype.NewClientWithListAndApply[*configv1.OAuth, *configv1.OAuthList, *applyconfigurationsconfigv1.OAuthApplyConfiguration]( + "oauths", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.OAuth { return &configv1.OAuth{} }, + func() *configv1.OAuthList { return &configv1.OAuthList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/operatorhub.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/operatorhub.go new file mode 100644 index 0000000000000..e3ba1b8abe511 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/operatorhub.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// OperatorHubsGetter has a method to return a OperatorHubInterface. +// A group's client should implement this interface. +type OperatorHubsGetter interface { + OperatorHubs() OperatorHubInterface +} + +// OperatorHubInterface has methods to work with OperatorHub resources. +type OperatorHubInterface interface { + Create(ctx context.Context, operatorHub *configv1.OperatorHub, opts metav1.CreateOptions) (*configv1.OperatorHub, error) + Update(ctx context.Context, operatorHub *configv1.OperatorHub, opts metav1.UpdateOptions) (*configv1.OperatorHub, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, operatorHub *configv1.OperatorHub, opts metav1.UpdateOptions) (*configv1.OperatorHub, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.OperatorHub, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.OperatorHubList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.OperatorHub, err error) + Apply(ctx context.Context, operatorHub *applyconfigurationsconfigv1.OperatorHubApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.OperatorHub, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, operatorHub *applyconfigurationsconfigv1.OperatorHubApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.OperatorHub, err error) + OperatorHubExpansion +} + +// operatorHubs implements OperatorHubInterface +type operatorHubs struct { + *gentype.ClientWithListAndApply[*configv1.OperatorHub, *configv1.OperatorHubList, *applyconfigurationsconfigv1.OperatorHubApplyConfiguration] +} + +// newOperatorHubs returns a OperatorHubs +func newOperatorHubs(c *ConfigV1Client) *operatorHubs { + return &operatorHubs{ + gentype.NewClientWithListAndApply[*configv1.OperatorHub, *configv1.OperatorHubList, *applyconfigurationsconfigv1.OperatorHubApplyConfiguration]( + "operatorhubs", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.OperatorHub { return &configv1.OperatorHub{} }, + func() *configv1.OperatorHubList { return &configv1.OperatorHubList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/project.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/project.go new file mode 100644 index 0000000000000..5cde353a6add3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/project.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ProjectsGetter has a method to return a ProjectInterface. +// A group's client should implement this interface. +type ProjectsGetter interface { + Projects() ProjectInterface +} + +// ProjectInterface has methods to work with Project resources. +type ProjectInterface interface { + Create(ctx context.Context, project *configv1.Project, opts metav1.CreateOptions) (*configv1.Project, error) + Update(ctx context.Context, project *configv1.Project, opts metav1.UpdateOptions) (*configv1.Project, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, project *configv1.Project, opts metav1.UpdateOptions) (*configv1.Project, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.Project, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.ProjectList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.Project, err error) + Apply(ctx context.Context, project *applyconfigurationsconfigv1.ProjectApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Project, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, project *applyconfigurationsconfigv1.ProjectApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Project, err error) + ProjectExpansion +} + +// projects implements ProjectInterface +type projects struct { + *gentype.ClientWithListAndApply[*configv1.Project, *configv1.ProjectList, *applyconfigurationsconfigv1.ProjectApplyConfiguration] +} + +// newProjects returns a Projects +func newProjects(c *ConfigV1Client) *projects { + return &projects{ + gentype.NewClientWithListAndApply[*configv1.Project, *configv1.ProjectList, *applyconfigurationsconfigv1.ProjectApplyConfiguration]( + "projects", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.Project { return &configv1.Project{} }, + func() *configv1.ProjectList { return &configv1.ProjectList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/proxy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/proxy.go new file mode 100644 index 0000000000000..55374ecfe8052 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/proxy.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ProxiesGetter has a method to return a ProxyInterface. +// A group's client should implement this interface. +type ProxiesGetter interface { + Proxies() ProxyInterface +} + +// ProxyInterface has methods to work with Proxy resources. +type ProxyInterface interface { + Create(ctx context.Context, proxy *configv1.Proxy, opts metav1.CreateOptions) (*configv1.Proxy, error) + Update(ctx context.Context, proxy *configv1.Proxy, opts metav1.UpdateOptions) (*configv1.Proxy, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, proxy *configv1.Proxy, opts metav1.UpdateOptions) (*configv1.Proxy, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.Proxy, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.ProxyList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.Proxy, err error) + Apply(ctx context.Context, proxy *applyconfigurationsconfigv1.ProxyApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Proxy, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, proxy *applyconfigurationsconfigv1.ProxyApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Proxy, err error) + ProxyExpansion +} + +// proxies implements ProxyInterface +type proxies struct { + *gentype.ClientWithListAndApply[*configv1.Proxy, *configv1.ProxyList, *applyconfigurationsconfigv1.ProxyApplyConfiguration] +} + +// newProxies returns a Proxies +func newProxies(c *ConfigV1Client) *proxies { + return &proxies{ + gentype.NewClientWithListAndApply[*configv1.Proxy, *configv1.ProxyList, *applyconfigurationsconfigv1.ProxyApplyConfiguration]( + "proxies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.Proxy { return &configv1.Proxy{} }, + func() *configv1.ProxyList { return &configv1.ProxyList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/scheduler.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/scheduler.go new file mode 100644 index 0000000000000..3bdc27dbc4ff0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/scheduler.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// SchedulersGetter has a method to return a SchedulerInterface. +// A group's client should implement this interface. +type SchedulersGetter interface { + Schedulers() SchedulerInterface +} + +// SchedulerInterface has methods to work with Scheduler resources. +type SchedulerInterface interface { + Create(ctx context.Context, scheduler *configv1.Scheduler, opts metav1.CreateOptions) (*configv1.Scheduler, error) + Update(ctx context.Context, scheduler *configv1.Scheduler, opts metav1.UpdateOptions) (*configv1.Scheduler, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, scheduler *configv1.Scheduler, opts metav1.UpdateOptions) (*configv1.Scheduler, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.Scheduler, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.SchedulerList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.Scheduler, err error) + Apply(ctx context.Context, scheduler *applyconfigurationsconfigv1.SchedulerApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Scheduler, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, scheduler *applyconfigurationsconfigv1.SchedulerApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.Scheduler, err error) + SchedulerExpansion +} + +// schedulers implements SchedulerInterface +type schedulers struct { + *gentype.ClientWithListAndApply[*configv1.Scheduler, *configv1.SchedulerList, *applyconfigurationsconfigv1.SchedulerApplyConfiguration] +} + +// newSchedulers returns a Schedulers +func newSchedulers(c *ConfigV1Client) *schedulers { + return &schedulers{ + gentype.NewClientWithListAndApply[*configv1.Scheduler, *configv1.SchedulerList, *applyconfigurationsconfigv1.SchedulerApplyConfiguration]( + "schedulers", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.Scheduler { return &configv1.Scheduler{} }, + func() *configv1.SchedulerList { return &configv1.SchedulerList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/backup.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/backup.go new file mode 100644 index 0000000000000..89c7b176e0da2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/backup.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + applyconfigurationsconfigv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// BackupsGetter has a method to return a BackupInterface. +// A group's client should implement this interface. +type BackupsGetter interface { + Backups() BackupInterface +} + +// BackupInterface has methods to work with Backup resources. +type BackupInterface interface { + Create(ctx context.Context, backup *configv1alpha1.Backup, opts v1.CreateOptions) (*configv1alpha1.Backup, error) + Update(ctx context.Context, backup *configv1alpha1.Backup, opts v1.UpdateOptions) (*configv1alpha1.Backup, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, backup *configv1alpha1.Backup, opts v1.UpdateOptions) (*configv1alpha1.Backup, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*configv1alpha1.Backup, error) + List(ctx context.Context, opts v1.ListOptions) (*configv1alpha1.BackupList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1alpha1.Backup, err error) + Apply(ctx context.Context, backup *applyconfigurationsconfigv1alpha1.BackupApplyConfiguration, opts v1.ApplyOptions) (result *configv1alpha1.Backup, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, backup *applyconfigurationsconfigv1alpha1.BackupApplyConfiguration, opts v1.ApplyOptions) (result *configv1alpha1.Backup, err error) + BackupExpansion +} + +// backups implements BackupInterface +type backups struct { + *gentype.ClientWithListAndApply[*configv1alpha1.Backup, *configv1alpha1.BackupList, *applyconfigurationsconfigv1alpha1.BackupApplyConfiguration] +} + +// newBackups returns a Backups +func newBackups(c *ConfigV1alpha1Client) *backups { + return &backups{ + gentype.NewClientWithListAndApply[*configv1alpha1.Backup, *configv1alpha1.BackupList, *applyconfigurationsconfigv1alpha1.BackupApplyConfiguration]( + "backups", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1alpha1.Backup { return &configv1alpha1.Backup{} }, + func() *configv1alpha1.BackupList { return &configv1alpha1.BackupList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/clusterimagepolicy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/clusterimagepolicy.go new file mode 100644 index 0000000000000..8391f7b40eb3b --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/clusterimagepolicy.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + applyconfigurationsconfigv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ClusterImagePoliciesGetter has a method to return a ClusterImagePolicyInterface. +// A group's client should implement this interface. +type ClusterImagePoliciesGetter interface { + ClusterImagePolicies() ClusterImagePolicyInterface +} + +// ClusterImagePolicyInterface has methods to work with ClusterImagePolicy resources. +type ClusterImagePolicyInterface interface { + Create(ctx context.Context, clusterImagePolicy *configv1alpha1.ClusterImagePolicy, opts v1.CreateOptions) (*configv1alpha1.ClusterImagePolicy, error) + Update(ctx context.Context, clusterImagePolicy *configv1alpha1.ClusterImagePolicy, opts v1.UpdateOptions) (*configv1alpha1.ClusterImagePolicy, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, clusterImagePolicy *configv1alpha1.ClusterImagePolicy, opts v1.UpdateOptions) (*configv1alpha1.ClusterImagePolicy, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*configv1alpha1.ClusterImagePolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*configv1alpha1.ClusterImagePolicyList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1alpha1.ClusterImagePolicy, err error) + Apply(ctx context.Context, clusterImagePolicy *applyconfigurationsconfigv1alpha1.ClusterImagePolicyApplyConfiguration, opts v1.ApplyOptions) (result *configv1alpha1.ClusterImagePolicy, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, clusterImagePolicy *applyconfigurationsconfigv1alpha1.ClusterImagePolicyApplyConfiguration, opts v1.ApplyOptions) (result *configv1alpha1.ClusterImagePolicy, err error) + ClusterImagePolicyExpansion +} + +// clusterImagePolicies implements ClusterImagePolicyInterface +type clusterImagePolicies struct { + *gentype.ClientWithListAndApply[*configv1alpha1.ClusterImagePolicy, *configv1alpha1.ClusterImagePolicyList, *applyconfigurationsconfigv1alpha1.ClusterImagePolicyApplyConfiguration] +} + +// newClusterImagePolicies returns a ClusterImagePolicies +func newClusterImagePolicies(c *ConfigV1alpha1Client) *clusterImagePolicies { + return &clusterImagePolicies{ + gentype.NewClientWithListAndApply[*configv1alpha1.ClusterImagePolicy, *configv1alpha1.ClusterImagePolicyList, *applyconfigurationsconfigv1alpha1.ClusterImagePolicyApplyConfiguration]( + "clusterimagepolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1alpha1.ClusterImagePolicy { return &configv1alpha1.ClusterImagePolicy{} }, + func() *configv1alpha1.ClusterImagePolicyList { return &configv1alpha1.ClusterImagePolicyList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/config_client.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/config_client.go new file mode 100644 index 0000000000000..a5af019600805 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/config_client.go @@ -0,0 +1,106 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + http "net/http" + + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type ConfigV1alpha1Interface interface { + RESTClient() rest.Interface + BackupsGetter + ClusterImagePoliciesGetter + ImagePoliciesGetter + InsightsDataGathersGetter +} + +// ConfigV1alpha1Client is used to interact with features provided by the config.openshift.io group. +type ConfigV1alpha1Client struct { + restClient rest.Interface +} + +func (c *ConfigV1alpha1Client) Backups() BackupInterface { + return newBackups(c) +} + +func (c *ConfigV1alpha1Client) ClusterImagePolicies() ClusterImagePolicyInterface { + return newClusterImagePolicies(c) +} + +func (c *ConfigV1alpha1Client) ImagePolicies(namespace string) ImagePolicyInterface { + return newImagePolicies(c, namespace) +} + +func (c *ConfigV1alpha1Client) InsightsDataGathers() InsightsDataGatherInterface { + return newInsightsDataGathers(c) +} + +// NewForConfig creates a new ConfigV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*ConfigV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ConfigV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ConfigV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &ConfigV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new ConfigV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ConfigV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ConfigV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *ConfigV1alpha1Client { + return &ConfigV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := configv1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ConfigV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/doc.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/doc.go new file mode 100644 index 0000000000000..93a7ca4e0e2b9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/doc.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/doc.go new file mode 100644 index 0000000000000..2b5ba4c8e4422 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/fake_backup.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/fake_backup.go new file mode 100644 index 0000000000000..91db422ac31f6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/fake_backup.go @@ -0,0 +1,35 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/openshift/api/config/v1alpha1" + configv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1" + typedconfigv1alpha1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" + gentype "k8s.io/client-go/gentype" +) + +// fakeBackups implements BackupInterface +type fakeBackups struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.Backup, *v1alpha1.BackupList, *configv1alpha1.BackupApplyConfiguration] + Fake *FakeConfigV1alpha1 +} + +func newFakeBackups(fake *FakeConfigV1alpha1) typedconfigv1alpha1.BackupInterface { + return &fakeBackups{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.Backup, *v1alpha1.BackupList, *configv1alpha1.BackupApplyConfiguration]( + fake.Fake, + "", + v1alpha1.SchemeGroupVersion.WithResource("backups"), + v1alpha1.SchemeGroupVersion.WithKind("Backup"), + func() *v1alpha1.Backup { return &v1alpha1.Backup{} }, + func() *v1alpha1.BackupList { return &v1alpha1.BackupList{} }, + func(dst, src *v1alpha1.BackupList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.BackupList) []*v1alpha1.Backup { return gentype.ToPointerSlice(list.Items) }, + func(list *v1alpha1.BackupList, items []*v1alpha1.Backup) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/fake_clusterimagepolicy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/fake_clusterimagepolicy.go new file mode 100644 index 0000000000000..50d94e1a91358 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/fake_clusterimagepolicy.go @@ -0,0 +1,37 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/openshift/api/config/v1alpha1" + configv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1" + typedconfigv1alpha1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" + gentype "k8s.io/client-go/gentype" +) + +// fakeClusterImagePolicies implements ClusterImagePolicyInterface +type fakeClusterImagePolicies struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.ClusterImagePolicy, *v1alpha1.ClusterImagePolicyList, *configv1alpha1.ClusterImagePolicyApplyConfiguration] + Fake *FakeConfigV1alpha1 +} + +func newFakeClusterImagePolicies(fake *FakeConfigV1alpha1) typedconfigv1alpha1.ClusterImagePolicyInterface { + return &fakeClusterImagePolicies{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.ClusterImagePolicy, *v1alpha1.ClusterImagePolicyList, *configv1alpha1.ClusterImagePolicyApplyConfiguration]( + fake.Fake, + "", + v1alpha1.SchemeGroupVersion.WithResource("clusterimagepolicies"), + v1alpha1.SchemeGroupVersion.WithKind("ClusterImagePolicy"), + func() *v1alpha1.ClusterImagePolicy { return &v1alpha1.ClusterImagePolicy{} }, + func() *v1alpha1.ClusterImagePolicyList { return &v1alpha1.ClusterImagePolicyList{} }, + func(dst, src *v1alpha1.ClusterImagePolicyList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.ClusterImagePolicyList) []*v1alpha1.ClusterImagePolicy { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha1.ClusterImagePolicyList, items []*v1alpha1.ClusterImagePolicy) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/fake_config_client.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/fake_config_client.go new file mode 100644 index 0000000000000..927818794a5f3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/fake_config_client.go @@ -0,0 +1,36 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeConfigV1alpha1 struct { + *testing.Fake +} + +func (c *FakeConfigV1alpha1) Backups() v1alpha1.BackupInterface { + return newFakeBackups(c) +} + +func (c *FakeConfigV1alpha1) ClusterImagePolicies() v1alpha1.ClusterImagePolicyInterface { + return newFakeClusterImagePolicies(c) +} + +func (c *FakeConfigV1alpha1) ImagePolicies(namespace string) v1alpha1.ImagePolicyInterface { + return newFakeImagePolicies(c, namespace) +} + +func (c *FakeConfigV1alpha1) InsightsDataGathers() v1alpha1.InsightsDataGatherInterface { + return newFakeInsightsDataGathers(c) +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeConfigV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/fake_imagepolicy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/fake_imagepolicy.go new file mode 100644 index 0000000000000..9bf6cb9c046b3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/fake_imagepolicy.go @@ -0,0 +1,37 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/openshift/api/config/v1alpha1" + configv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1" + typedconfigv1alpha1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" + gentype "k8s.io/client-go/gentype" +) + +// fakeImagePolicies implements ImagePolicyInterface +type fakeImagePolicies struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.ImagePolicy, *v1alpha1.ImagePolicyList, *configv1alpha1.ImagePolicyApplyConfiguration] + Fake *FakeConfigV1alpha1 +} + +func newFakeImagePolicies(fake *FakeConfigV1alpha1, namespace string) typedconfigv1alpha1.ImagePolicyInterface { + return &fakeImagePolicies{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.ImagePolicy, *v1alpha1.ImagePolicyList, *configv1alpha1.ImagePolicyApplyConfiguration]( + fake.Fake, + namespace, + v1alpha1.SchemeGroupVersion.WithResource("imagepolicies"), + v1alpha1.SchemeGroupVersion.WithKind("ImagePolicy"), + func() *v1alpha1.ImagePolicy { return &v1alpha1.ImagePolicy{} }, + func() *v1alpha1.ImagePolicyList { return &v1alpha1.ImagePolicyList{} }, + func(dst, src *v1alpha1.ImagePolicyList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.ImagePolicyList) []*v1alpha1.ImagePolicy { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha1.ImagePolicyList, items []*v1alpha1.ImagePolicy) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/fake_insightsdatagather.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/fake_insightsdatagather.go new file mode 100644 index 0000000000000..fc68d168e5434 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/fake_insightsdatagather.go @@ -0,0 +1,37 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/openshift/api/config/v1alpha1" + configv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1" + typedconfigv1alpha1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" + gentype "k8s.io/client-go/gentype" +) + +// fakeInsightsDataGathers implements InsightsDataGatherInterface +type fakeInsightsDataGathers struct { + *gentype.FakeClientWithListAndApply[*v1alpha1.InsightsDataGather, *v1alpha1.InsightsDataGatherList, *configv1alpha1.InsightsDataGatherApplyConfiguration] + Fake *FakeConfigV1alpha1 +} + +func newFakeInsightsDataGathers(fake *FakeConfigV1alpha1) typedconfigv1alpha1.InsightsDataGatherInterface { + return &fakeInsightsDataGathers{ + gentype.NewFakeClientWithListAndApply[*v1alpha1.InsightsDataGather, *v1alpha1.InsightsDataGatherList, *configv1alpha1.InsightsDataGatherApplyConfiguration]( + fake.Fake, + "", + v1alpha1.SchemeGroupVersion.WithResource("insightsdatagathers"), + v1alpha1.SchemeGroupVersion.WithKind("InsightsDataGather"), + func() *v1alpha1.InsightsDataGather { return &v1alpha1.InsightsDataGather{} }, + func() *v1alpha1.InsightsDataGatherList { return &v1alpha1.InsightsDataGatherList{} }, + func(dst, src *v1alpha1.InsightsDataGatherList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.InsightsDataGatherList) []*v1alpha1.InsightsDataGather { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1alpha1.InsightsDataGatherList, items []*v1alpha1.InsightsDataGather) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/generated_expansion.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/generated_expansion.go new file mode 100644 index 0000000000000..3a69741b1de6b --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/generated_expansion.go @@ -0,0 +1,11 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type BackupExpansion interface{} + +type ClusterImagePolicyExpansion interface{} + +type ImagePolicyExpansion interface{} + +type InsightsDataGatherExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/imagepolicy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/imagepolicy.go new file mode 100644 index 0000000000000..a893efeea750a --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/imagepolicy.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + applyconfigurationsconfigv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ImagePoliciesGetter has a method to return a ImagePolicyInterface. +// A group's client should implement this interface. +type ImagePoliciesGetter interface { + ImagePolicies(namespace string) ImagePolicyInterface +} + +// ImagePolicyInterface has methods to work with ImagePolicy resources. +type ImagePolicyInterface interface { + Create(ctx context.Context, imagePolicy *configv1alpha1.ImagePolicy, opts v1.CreateOptions) (*configv1alpha1.ImagePolicy, error) + Update(ctx context.Context, imagePolicy *configv1alpha1.ImagePolicy, opts v1.UpdateOptions) (*configv1alpha1.ImagePolicy, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, imagePolicy *configv1alpha1.ImagePolicy, opts v1.UpdateOptions) (*configv1alpha1.ImagePolicy, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*configv1alpha1.ImagePolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*configv1alpha1.ImagePolicyList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1alpha1.ImagePolicy, err error) + Apply(ctx context.Context, imagePolicy *applyconfigurationsconfigv1alpha1.ImagePolicyApplyConfiguration, opts v1.ApplyOptions) (result *configv1alpha1.ImagePolicy, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, imagePolicy *applyconfigurationsconfigv1alpha1.ImagePolicyApplyConfiguration, opts v1.ApplyOptions) (result *configv1alpha1.ImagePolicy, err error) + ImagePolicyExpansion +} + +// imagePolicies implements ImagePolicyInterface +type imagePolicies struct { + *gentype.ClientWithListAndApply[*configv1alpha1.ImagePolicy, *configv1alpha1.ImagePolicyList, *applyconfigurationsconfigv1alpha1.ImagePolicyApplyConfiguration] +} + +// newImagePolicies returns a ImagePolicies +func newImagePolicies(c *ConfigV1alpha1Client, namespace string) *imagePolicies { + return &imagePolicies{ + gentype.NewClientWithListAndApply[*configv1alpha1.ImagePolicy, *configv1alpha1.ImagePolicyList, *applyconfigurationsconfigv1alpha1.ImagePolicyApplyConfiguration]( + "imagepolicies", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *configv1alpha1.ImagePolicy { return &configv1alpha1.ImagePolicy{} }, + func() *configv1alpha1.ImagePolicyList { return &configv1alpha1.ImagePolicyList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/insightsdatagather.go new file mode 100644 index 0000000000000..cff76db8de046 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/insightsdatagather.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + applyconfigurationsconfigv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// InsightsDataGathersGetter has a method to return a InsightsDataGatherInterface. +// A group's client should implement this interface. +type InsightsDataGathersGetter interface { + InsightsDataGathers() InsightsDataGatherInterface +} + +// InsightsDataGatherInterface has methods to work with InsightsDataGather resources. +type InsightsDataGatherInterface interface { + Create(ctx context.Context, insightsDataGather *configv1alpha1.InsightsDataGather, opts v1.CreateOptions) (*configv1alpha1.InsightsDataGather, error) + Update(ctx context.Context, insightsDataGather *configv1alpha1.InsightsDataGather, opts v1.UpdateOptions) (*configv1alpha1.InsightsDataGather, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, insightsDataGather *configv1alpha1.InsightsDataGather, opts v1.UpdateOptions) (*configv1alpha1.InsightsDataGather, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*configv1alpha1.InsightsDataGather, error) + List(ctx context.Context, opts v1.ListOptions) (*configv1alpha1.InsightsDataGatherList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1alpha1.InsightsDataGather, err error) + Apply(ctx context.Context, insightsDataGather *applyconfigurationsconfigv1alpha1.InsightsDataGatherApplyConfiguration, opts v1.ApplyOptions) (result *configv1alpha1.InsightsDataGather, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, insightsDataGather *applyconfigurationsconfigv1alpha1.InsightsDataGatherApplyConfiguration, opts v1.ApplyOptions) (result *configv1alpha1.InsightsDataGather, err error) + InsightsDataGatherExpansion +} + +// insightsDataGathers implements InsightsDataGatherInterface +type insightsDataGathers struct { + *gentype.ClientWithListAndApply[*configv1alpha1.InsightsDataGather, *configv1alpha1.InsightsDataGatherList, *applyconfigurationsconfigv1alpha1.InsightsDataGatherApplyConfiguration] +} + +// newInsightsDataGathers returns a InsightsDataGathers +func newInsightsDataGathers(c *ConfigV1alpha1Client) *insightsDataGathers { + return &insightsDataGathers{ + gentype.NewClientWithListAndApply[*configv1alpha1.InsightsDataGather, *configv1alpha1.InsightsDataGatherList, *applyconfigurationsconfigv1alpha1.InsightsDataGatherApplyConfiguration]( + "insightsdatagathers", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1alpha1.InsightsDataGather { return &configv1alpha1.InsightsDataGather{} }, + func() *configv1alpha1.InsightsDataGatherList { return &configv1alpha1.InsightsDataGatherList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/interface.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/interface.go new file mode 100644 index 0000000000000..3e7e6e8d3bb07 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/interface.go @@ -0,0 +1,38 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package config + +import ( + v1 "github.com/openshift/client-go/config/informers/externalversions/config/v1" + v1alpha1 "github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/apiserver.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/apiserver.go new file mode 100644 index 0000000000000..262aa7b0a2c74 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/apiserver.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// APIServerInformer provides access to a shared informer and lister for +// APIServers. +type APIServerInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.APIServerLister +} + +type aPIServerInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewAPIServerInformer constructs a new informer for APIServer type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewAPIServerInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredAPIServerInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredAPIServerInformer constructs a new informer for APIServer type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredAPIServerInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().APIServers().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().APIServers().Watch(context.TODO(), options) + }, + }, + &apiconfigv1.APIServer{}, + resyncPeriod, + indexers, + ) +} + +func (f *aPIServerInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredAPIServerInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *aPIServerInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.APIServer{}, f.defaultInformer) +} + +func (f *aPIServerInformer) Lister() configv1.APIServerLister { + return configv1.NewAPIServerLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/authentication.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/authentication.go new file mode 100644 index 0000000000000..efe2c253e9c48 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/authentication.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// AuthenticationInformer provides access to a shared informer and lister for +// Authentications. +type AuthenticationInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.AuthenticationLister +} + +type authenticationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewAuthenticationInformer constructs a new informer for Authentication type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewAuthenticationInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredAuthenticationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredAuthenticationInformer constructs a new informer for Authentication type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredAuthenticationInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Authentications().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Authentications().Watch(context.TODO(), options) + }, + }, + &apiconfigv1.Authentication{}, + resyncPeriod, + indexers, + ) +} + +func (f *authenticationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredAuthenticationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *authenticationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.Authentication{}, f.defaultInformer) +} + +func (f *authenticationInformer) Lister() configv1.AuthenticationLister { + return configv1.NewAuthenticationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/build.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/build.go new file mode 100644 index 0000000000000..451ba252da373 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/build.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// BuildInformer provides access to a shared informer and lister for +// Builds. +type BuildInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.BuildLister +} + +type buildInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewBuildInformer constructs a new informer for Build type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewBuildInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredBuildInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredBuildInformer constructs a new informer for Build type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredBuildInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Builds().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Builds().Watch(context.TODO(), options) + }, + }, + &apiconfigv1.Build{}, + resyncPeriod, + indexers, + ) +} + +func (f *buildInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredBuildInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *buildInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.Build{}, f.defaultInformer) +} + +func (f *buildInformer) Lister() configv1.BuildLister { + return configv1.NewBuildLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusteroperator.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusteroperator.go new file mode 100644 index 0000000000000..1eda53c8b9e9b --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusteroperator.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterOperatorInformer provides access to a shared informer and lister for +// ClusterOperators. +type ClusterOperatorInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.ClusterOperatorLister +} + +type clusterOperatorInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterOperatorInformer constructs a new informer for ClusterOperator type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterOperatorInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterOperatorInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterOperatorInformer constructs a new informer for ClusterOperator type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterOperatorInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ClusterOperators().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ClusterOperators().Watch(context.TODO(), options) + }, + }, + &apiconfigv1.ClusterOperator{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterOperatorInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterOperatorInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterOperatorInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.ClusterOperator{}, f.defaultInformer) +} + +func (f *clusterOperatorInformer) Lister() configv1.ClusterOperatorLister { + return configv1.NewClusterOperatorLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusterversion.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusterversion.go new file mode 100644 index 0000000000000..c3915175ea4ad --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusterversion.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterVersionInformer provides access to a shared informer and lister for +// ClusterVersions. +type ClusterVersionInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.ClusterVersionLister +} + +type clusterVersionInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterVersionInformer constructs a new informer for ClusterVersion type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterVersionInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterVersionInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterVersionInformer constructs a new informer for ClusterVersion type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterVersionInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ClusterVersions().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ClusterVersions().Watch(context.TODO(), options) + }, + }, + &apiconfigv1.ClusterVersion{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterVersionInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterVersionInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterVersionInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.ClusterVersion{}, f.defaultInformer) +} + +func (f *clusterVersionInformer) Lister() configv1.ClusterVersionLister { + return configv1.NewClusterVersionLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/console.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/console.go new file mode 100644 index 0000000000000..05a36ec0a1fb0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/console.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ConsoleInformer provides access to a shared informer and lister for +// Consoles. +type ConsoleInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.ConsoleLister +} + +type consoleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewConsoleInformer constructs a new informer for Console type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewConsoleInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredConsoleInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredConsoleInformer constructs a new informer for Console type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredConsoleInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Consoles().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Consoles().Watch(context.TODO(), options) + }, + }, + &apiconfigv1.Console{}, + resyncPeriod, + indexers, + ) +} + +func (f *consoleInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredConsoleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *consoleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.Console{}, f.defaultInformer) +} + +func (f *consoleInformer) Lister() configv1.ConsoleLister { + return configv1.NewConsoleLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/dns.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/dns.go new file mode 100644 index 0000000000000..af44dfce98541 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/dns.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// DNSInformer provides access to a shared informer and lister for +// DNSes. +type DNSInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.DNSLister +} + +type dNSInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewDNSInformer constructs a new informer for DNS type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDNSInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDNSInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredDNSInformer constructs a new informer for DNS type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDNSInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().DNSes().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().DNSes().Watch(context.TODO(), options) + }, + }, + &apiconfigv1.DNS{}, + resyncPeriod, + indexers, + ) +} + +func (f *dNSInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDNSInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *dNSInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.DNS{}, f.defaultInformer) +} + +func (f *dNSInformer) Lister() configv1.DNSLister { + return configv1.NewDNSLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/featuregate.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/featuregate.go new file mode 100644 index 0000000000000..dc1e2050706b4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/featuregate.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// FeatureGateInformer provides access to a shared informer and lister for +// FeatureGates. +type FeatureGateInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.FeatureGateLister +} + +type featureGateInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewFeatureGateInformer constructs a new informer for FeatureGate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFeatureGateInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredFeatureGateInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredFeatureGateInformer constructs a new informer for FeatureGate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredFeatureGateInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().FeatureGates().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().FeatureGates().Watch(context.TODO(), options) + }, + }, + &apiconfigv1.FeatureGate{}, + resyncPeriod, + indexers, + ) +} + +func (f *featureGateInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredFeatureGateInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *featureGateInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.FeatureGate{}, f.defaultInformer) +} + +func (f *featureGateInformer) Lister() configv1.FeatureGateLister { + return configv1.NewFeatureGateLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/image.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/image.go new file mode 100644 index 0000000000000..5f68a35ec6b08 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/image.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ImageInformer provides access to a shared informer and lister for +// Images. +type ImageInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.ImageLister +} + +type imageInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewImageInformer constructs a new informer for Image type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewImageInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredImageInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredImageInformer constructs a new informer for Image type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredImageInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Images().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Images().Watch(context.TODO(), options) + }, + }, + &apiconfigv1.Image{}, + resyncPeriod, + indexers, + ) +} + +func (f *imageInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredImageInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *imageInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.Image{}, f.defaultInformer) +} + +func (f *imageInformer) Lister() configv1.ImageLister { + return configv1.NewImageLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagecontentpolicy.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagecontentpolicy.go new file mode 100644 index 0000000000000..e062099ea29ec --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagecontentpolicy.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ImageContentPolicyInformer provides access to a shared informer and lister for +// ImageContentPolicies. +type ImageContentPolicyInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.ImageContentPolicyLister +} + +type imageContentPolicyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewImageContentPolicyInformer constructs a new informer for ImageContentPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewImageContentPolicyInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredImageContentPolicyInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredImageContentPolicyInformer constructs a new informer for ImageContentPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredImageContentPolicyInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ImageContentPolicies().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ImageContentPolicies().Watch(context.TODO(), options) + }, + }, + &apiconfigv1.ImageContentPolicy{}, + resyncPeriod, + indexers, + ) +} + +func (f *imageContentPolicyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredImageContentPolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *imageContentPolicyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.ImageContentPolicy{}, f.defaultInformer) +} + +func (f *imageContentPolicyInformer) Lister() configv1.ImageContentPolicyLister { + return configv1.NewImageContentPolicyLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagedigestmirrorset.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagedigestmirrorset.go new file mode 100644 index 0000000000000..0bdadff5b519c --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagedigestmirrorset.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ImageDigestMirrorSetInformer provides access to a shared informer and lister for +// ImageDigestMirrorSets. +type ImageDigestMirrorSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.ImageDigestMirrorSetLister +} + +type imageDigestMirrorSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewImageDigestMirrorSetInformer constructs a new informer for ImageDigestMirrorSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewImageDigestMirrorSetInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredImageDigestMirrorSetInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredImageDigestMirrorSetInformer constructs a new informer for ImageDigestMirrorSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredImageDigestMirrorSetInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ImageDigestMirrorSets().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ImageDigestMirrorSets().Watch(context.TODO(), options) + }, + }, + &apiconfigv1.ImageDigestMirrorSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *imageDigestMirrorSetInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredImageDigestMirrorSetInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *imageDigestMirrorSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.ImageDigestMirrorSet{}, f.defaultInformer) +} + +func (f *imageDigestMirrorSetInformer) Lister() configv1.ImageDigestMirrorSetLister { + return configv1.NewImageDigestMirrorSetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagetagmirrorset.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagetagmirrorset.go new file mode 100644 index 0000000000000..92bf24f20150a --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagetagmirrorset.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ImageTagMirrorSetInformer provides access to a shared informer and lister for +// ImageTagMirrorSets. +type ImageTagMirrorSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.ImageTagMirrorSetLister +} + +type imageTagMirrorSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewImageTagMirrorSetInformer constructs a new informer for ImageTagMirrorSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewImageTagMirrorSetInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredImageTagMirrorSetInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredImageTagMirrorSetInformer constructs a new informer for ImageTagMirrorSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredImageTagMirrorSetInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ImageTagMirrorSets().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ImageTagMirrorSets().Watch(context.TODO(), options) + }, + }, + &apiconfigv1.ImageTagMirrorSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *imageTagMirrorSetInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredImageTagMirrorSetInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *imageTagMirrorSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.ImageTagMirrorSet{}, f.defaultInformer) +} + +func (f *imageTagMirrorSetInformer) Lister() configv1.ImageTagMirrorSetLister { + return configv1.NewImageTagMirrorSetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/infrastructure.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/infrastructure.go new file mode 100644 index 0000000000000..4891bd24971fb --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/infrastructure.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// InfrastructureInformer provides access to a shared informer and lister for +// Infrastructures. +type InfrastructureInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.InfrastructureLister +} + +type infrastructureInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewInfrastructureInformer constructs a new informer for Infrastructure type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewInfrastructureInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredInfrastructureInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredInfrastructureInformer constructs a new informer for Infrastructure type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredInfrastructureInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Infrastructures().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Infrastructures().Watch(context.TODO(), options) + }, + }, + &apiconfigv1.Infrastructure{}, + resyncPeriod, + indexers, + ) +} + +func (f *infrastructureInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredInfrastructureInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *infrastructureInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.Infrastructure{}, f.defaultInformer) +} + +func (f *infrastructureInformer) Lister() configv1.InfrastructureLister { + return configv1.NewInfrastructureLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/ingress.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/ingress.go new file mode 100644 index 0000000000000..59ca11638bee4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/ingress.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// IngressInformer provides access to a shared informer and lister for +// Ingresses. +type IngressInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.IngressLister +} + +type ingressInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewIngressInformer constructs a new informer for Ingress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewIngressInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredIngressInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredIngressInformer constructs a new informer for Ingress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredIngressInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Ingresses().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Ingresses().Watch(context.TODO(), options) + }, + }, + &apiconfigv1.Ingress{}, + resyncPeriod, + indexers, + ) +} + +func (f *ingressInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredIngressInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *ingressInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.Ingress{}, f.defaultInformer) +} + +func (f *ingressInformer) Lister() configv1.IngressLister { + return configv1.NewIngressLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go new file mode 100644 index 0000000000000..f49b1d22872ce --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go @@ -0,0 +1,169 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // APIServers returns a APIServerInformer. + APIServers() APIServerInformer + // Authentications returns a AuthenticationInformer. + Authentications() AuthenticationInformer + // Builds returns a BuildInformer. + Builds() BuildInformer + // ClusterOperators returns a ClusterOperatorInformer. + ClusterOperators() ClusterOperatorInformer + // ClusterVersions returns a ClusterVersionInformer. + ClusterVersions() ClusterVersionInformer + // Consoles returns a ConsoleInformer. + Consoles() ConsoleInformer + // DNSes returns a DNSInformer. + DNSes() DNSInformer + // FeatureGates returns a FeatureGateInformer. + FeatureGates() FeatureGateInformer + // Images returns a ImageInformer. + Images() ImageInformer + // ImageContentPolicies returns a ImageContentPolicyInformer. + ImageContentPolicies() ImageContentPolicyInformer + // ImageDigestMirrorSets returns a ImageDigestMirrorSetInformer. + ImageDigestMirrorSets() ImageDigestMirrorSetInformer + // ImageTagMirrorSets returns a ImageTagMirrorSetInformer. + ImageTagMirrorSets() ImageTagMirrorSetInformer + // Infrastructures returns a InfrastructureInformer. + Infrastructures() InfrastructureInformer + // Ingresses returns a IngressInformer. + Ingresses() IngressInformer + // Networks returns a NetworkInformer. + Networks() NetworkInformer + // Nodes returns a NodeInformer. + Nodes() NodeInformer + // OAuths returns a OAuthInformer. + OAuths() OAuthInformer + // OperatorHubs returns a OperatorHubInformer. + OperatorHubs() OperatorHubInformer + // Projects returns a ProjectInformer. + Projects() ProjectInformer + // Proxies returns a ProxyInformer. + Proxies() ProxyInformer + // Schedulers returns a SchedulerInformer. + Schedulers() SchedulerInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// APIServers returns a APIServerInformer. +func (v *version) APIServers() APIServerInformer { + return &aPIServerInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Authentications returns a AuthenticationInformer. +func (v *version) Authentications() AuthenticationInformer { + return &authenticationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Builds returns a BuildInformer. +func (v *version) Builds() BuildInformer { + return &buildInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ClusterOperators returns a ClusterOperatorInformer. +func (v *version) ClusterOperators() ClusterOperatorInformer { + return &clusterOperatorInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ClusterVersions returns a ClusterVersionInformer. +func (v *version) ClusterVersions() ClusterVersionInformer { + return &clusterVersionInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Consoles returns a ConsoleInformer. +func (v *version) Consoles() ConsoleInformer { + return &consoleInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// DNSes returns a DNSInformer. +func (v *version) DNSes() DNSInformer { + return &dNSInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// FeatureGates returns a FeatureGateInformer. +func (v *version) FeatureGates() FeatureGateInformer { + return &featureGateInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Images returns a ImageInformer. +func (v *version) Images() ImageInformer { + return &imageInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ImageContentPolicies returns a ImageContentPolicyInformer. +func (v *version) ImageContentPolicies() ImageContentPolicyInformer { + return &imageContentPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ImageDigestMirrorSets returns a ImageDigestMirrorSetInformer. +func (v *version) ImageDigestMirrorSets() ImageDigestMirrorSetInformer { + return &imageDigestMirrorSetInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ImageTagMirrorSets returns a ImageTagMirrorSetInformer. +func (v *version) ImageTagMirrorSets() ImageTagMirrorSetInformer { + return &imageTagMirrorSetInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Infrastructures returns a InfrastructureInformer. +func (v *version) Infrastructures() InfrastructureInformer { + return &infrastructureInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Ingresses returns a IngressInformer. +func (v *version) Ingresses() IngressInformer { + return &ingressInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Networks returns a NetworkInformer. +func (v *version) Networks() NetworkInformer { + return &networkInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Nodes returns a NodeInformer. +func (v *version) Nodes() NodeInformer { + return &nodeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// OAuths returns a OAuthInformer. +func (v *version) OAuths() OAuthInformer { + return &oAuthInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// OperatorHubs returns a OperatorHubInformer. +func (v *version) OperatorHubs() OperatorHubInformer { + return &operatorHubInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Projects returns a ProjectInformer. +func (v *version) Projects() ProjectInformer { + return &projectInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Proxies returns a ProxyInformer. +func (v *version) Proxies() ProxyInformer { + return &proxyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Schedulers returns a SchedulerInformer. +func (v *version) Schedulers() SchedulerInformer { + return &schedulerInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/network.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/network.go new file mode 100644 index 0000000000000..48e4896defe14 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/network.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// NetworkInformer provides access to a shared informer and lister for +// Networks. +type NetworkInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.NetworkLister +} + +type networkInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewNetworkInformer constructs a new informer for Network type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewNetworkInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredNetworkInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredNetworkInformer constructs a new informer for Network type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredNetworkInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Networks().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Networks().Watch(context.TODO(), options) + }, + }, + &apiconfigv1.Network{}, + resyncPeriod, + indexers, + ) +} + +func (f *networkInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredNetworkInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *networkInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.Network{}, f.defaultInformer) +} + +func (f *networkInformer) Lister() configv1.NetworkLister { + return configv1.NewNetworkLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/node.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/node.go new file mode 100644 index 0000000000000..2cb791b00ca15 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/node.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// NodeInformer provides access to a shared informer and lister for +// Nodes. +type NodeInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.NodeLister +} + +type nodeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewNodeInformer constructs a new informer for Node type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewNodeInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredNodeInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredNodeInformer constructs a new informer for Node type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredNodeInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Nodes().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Nodes().Watch(context.TODO(), options) + }, + }, + &apiconfigv1.Node{}, + resyncPeriod, + indexers, + ) +} + +func (f *nodeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredNodeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *nodeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.Node{}, f.defaultInformer) +} + +func (f *nodeInformer) Lister() configv1.NodeLister { + return configv1.NewNodeLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/oauth.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/oauth.go new file mode 100644 index 0000000000000..75128769f3e2c --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/oauth.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// OAuthInformer provides access to a shared informer and lister for +// OAuths. +type OAuthInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.OAuthLister +} + +type oAuthInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewOAuthInformer constructs a new informer for OAuth type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewOAuthInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredOAuthInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredOAuthInformer constructs a new informer for OAuth type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredOAuthInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().OAuths().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().OAuths().Watch(context.TODO(), options) + }, + }, + &apiconfigv1.OAuth{}, + resyncPeriod, + indexers, + ) +} + +func (f *oAuthInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredOAuthInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *oAuthInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.OAuth{}, f.defaultInformer) +} + +func (f *oAuthInformer) Lister() configv1.OAuthLister { + return configv1.NewOAuthLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/operatorhub.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/operatorhub.go new file mode 100644 index 0000000000000..d2196b2255f3b --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/operatorhub.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// OperatorHubInformer provides access to a shared informer and lister for +// OperatorHubs. +type OperatorHubInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.OperatorHubLister +} + +type operatorHubInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewOperatorHubInformer constructs a new informer for OperatorHub type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewOperatorHubInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredOperatorHubInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredOperatorHubInformer constructs a new informer for OperatorHub type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredOperatorHubInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().OperatorHubs().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().OperatorHubs().Watch(context.TODO(), options) + }, + }, + &apiconfigv1.OperatorHub{}, + resyncPeriod, + indexers, + ) +} + +func (f *operatorHubInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredOperatorHubInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *operatorHubInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.OperatorHub{}, f.defaultInformer) +} + +func (f *operatorHubInformer) Lister() configv1.OperatorHubLister { + return configv1.NewOperatorHubLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/project.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/project.go new file mode 100644 index 0000000000000..0c5604e1e4d3e --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/project.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ProjectInformer provides access to a shared informer and lister for +// Projects. +type ProjectInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.ProjectLister +} + +type projectInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewProjectInformer constructs a new informer for Project type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewProjectInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredProjectInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredProjectInformer constructs a new informer for Project type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredProjectInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Projects().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Projects().Watch(context.TODO(), options) + }, + }, + &apiconfigv1.Project{}, + resyncPeriod, + indexers, + ) +} + +func (f *projectInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredProjectInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *projectInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.Project{}, f.defaultInformer) +} + +func (f *projectInformer) Lister() configv1.ProjectLister { + return configv1.NewProjectLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/proxy.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/proxy.go new file mode 100644 index 0000000000000..aa1c2c551b615 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/proxy.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ProxyInformer provides access to a shared informer and lister for +// Proxies. +type ProxyInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.ProxyLister +} + +type proxyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewProxyInformer constructs a new informer for Proxy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewProxyInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredProxyInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredProxyInformer constructs a new informer for Proxy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredProxyInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Proxies().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Proxies().Watch(context.TODO(), options) + }, + }, + &apiconfigv1.Proxy{}, + resyncPeriod, + indexers, + ) +} + +func (f *proxyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredProxyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *proxyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.Proxy{}, f.defaultInformer) +} + +func (f *proxyInformer) Lister() configv1.ProxyLister { + return configv1.NewProxyLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/scheduler.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/scheduler.go new file mode 100644 index 0000000000000..0117f2941d2a4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/scheduler.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// SchedulerInformer provides access to a shared informer and lister for +// Schedulers. +type SchedulerInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.SchedulerLister +} + +type schedulerInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewSchedulerInformer constructs a new informer for Scheduler type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewSchedulerInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredSchedulerInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredSchedulerInformer constructs a new informer for Scheduler type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredSchedulerInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Schedulers().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Schedulers().Watch(context.TODO(), options) + }, + }, + &apiconfigv1.Scheduler{}, + resyncPeriod, + indexers, + ) +} + +func (f *schedulerInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredSchedulerInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *schedulerInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.Scheduler{}, f.defaultInformer) +} + +func (f *schedulerInformer) Lister() configv1.SchedulerLister { + return configv1.NewSchedulerLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/backup.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/backup.go new file mode 100644 index 0000000000000..bed1857ee8e6a --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/backup.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + time "time" + + apiconfigv1alpha1 "github.com/openshift/api/config/v1alpha1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1alpha1 "github.com/openshift/client-go/config/listers/config/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// BackupInformer provides access to a shared informer and lister for +// Backups. +type BackupInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1alpha1.BackupLister +} + +type backupInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewBackupInformer constructs a new informer for Backup type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewBackupInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredBackupInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredBackupInformer constructs a new informer for Backup type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredBackupInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha1().Backups().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha1().Backups().Watch(context.TODO(), options) + }, + }, + &apiconfigv1alpha1.Backup{}, + resyncPeriod, + indexers, + ) +} + +func (f *backupInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredBackupInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *backupInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1alpha1.Backup{}, f.defaultInformer) +} + +func (f *backupInformer) Lister() configv1alpha1.BackupLister { + return configv1alpha1.NewBackupLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/clusterimagepolicy.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/clusterimagepolicy.go new file mode 100644 index 0000000000000..b11866c35f98a --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/clusterimagepolicy.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + time "time" + + apiconfigv1alpha1 "github.com/openshift/api/config/v1alpha1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1alpha1 "github.com/openshift/client-go/config/listers/config/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterImagePolicyInformer provides access to a shared informer and lister for +// ClusterImagePolicies. +type ClusterImagePolicyInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1alpha1.ClusterImagePolicyLister +} + +type clusterImagePolicyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterImagePolicyInformer constructs a new informer for ClusterImagePolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterImagePolicyInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterImagePolicyInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterImagePolicyInformer constructs a new informer for ClusterImagePolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterImagePolicyInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha1().ClusterImagePolicies().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha1().ClusterImagePolicies().Watch(context.TODO(), options) + }, + }, + &apiconfigv1alpha1.ClusterImagePolicy{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterImagePolicyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterImagePolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterImagePolicyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1alpha1.ClusterImagePolicy{}, f.defaultInformer) +} + +func (f *clusterImagePolicyInformer) Lister() configv1alpha1.ClusterImagePolicyLister { + return configv1alpha1.NewClusterImagePolicyLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/imagepolicy.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/imagepolicy.go new file mode 100644 index 0000000000000..d6ab02a00099d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/imagepolicy.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + time "time" + + apiconfigv1alpha1 "github.com/openshift/api/config/v1alpha1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1alpha1 "github.com/openshift/client-go/config/listers/config/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ImagePolicyInformer provides access to a shared informer and lister for +// ImagePolicies. +type ImagePolicyInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1alpha1.ImagePolicyLister +} + +type imagePolicyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewImagePolicyInformer constructs a new informer for ImagePolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewImagePolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredImagePolicyInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredImagePolicyInformer constructs a new informer for ImagePolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredImagePolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha1().ImagePolicies(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha1().ImagePolicies(namespace).Watch(context.TODO(), options) + }, + }, + &apiconfigv1alpha1.ImagePolicy{}, + resyncPeriod, + indexers, + ) +} + +func (f *imagePolicyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredImagePolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *imagePolicyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1alpha1.ImagePolicy{}, f.defaultInformer) +} + +func (f *imagePolicyInformer) Lister() configv1alpha1.ImagePolicyLister { + return configv1alpha1.NewImagePolicyLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/insightsdatagather.go new file mode 100644 index 0000000000000..51f09bad2c6f2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/insightsdatagather.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + time "time" + + apiconfigv1alpha1 "github.com/openshift/api/config/v1alpha1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1alpha1 "github.com/openshift/client-go/config/listers/config/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// InsightsDataGatherInformer provides access to a shared informer and lister for +// InsightsDataGathers. +type InsightsDataGatherInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1alpha1.InsightsDataGatherLister +} + +type insightsDataGatherInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewInsightsDataGatherInformer constructs a new informer for InsightsDataGather type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewInsightsDataGatherInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredInsightsDataGatherInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredInsightsDataGatherInformer constructs a new informer for InsightsDataGather type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredInsightsDataGatherInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha1().InsightsDataGathers().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha1().InsightsDataGathers().Watch(context.TODO(), options) + }, + }, + &apiconfigv1alpha1.InsightsDataGather{}, + resyncPeriod, + indexers, + ) +} + +func (f *insightsDataGatherInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredInsightsDataGatherInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *insightsDataGatherInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1alpha1.InsightsDataGather{}, f.defaultInformer) +} + +func (f *insightsDataGatherInformer) Lister() configv1alpha1.InsightsDataGatherLister { + return configv1alpha1.NewInsightsDataGatherLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/interface.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/interface.go new file mode 100644 index 0000000000000..69b5569fa0b06 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/interface.go @@ -0,0 +1,50 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Backups returns a BackupInformer. + Backups() BackupInformer + // ClusterImagePolicies returns a ClusterImagePolicyInformer. + ClusterImagePolicies() ClusterImagePolicyInformer + // ImagePolicies returns a ImagePolicyInformer. + ImagePolicies() ImagePolicyInformer + // InsightsDataGathers returns a InsightsDataGatherInformer. + InsightsDataGathers() InsightsDataGatherInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Backups returns a BackupInformer. +func (v *version) Backups() BackupInformer { + return &backupInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ClusterImagePolicies returns a ClusterImagePolicyInformer. +func (v *version) ClusterImagePolicies() ClusterImagePolicyInformer { + return &clusterImagePolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ImagePolicies returns a ImagePolicyInformer. +func (v *version) ImagePolicies() ImagePolicyInformer { + return &imagePolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// InsightsDataGathers returns a InsightsDataGatherInformer. +func (v *version) InsightsDataGathers() InsightsDataGatherInformer { + return &insightsDataGatherInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/factory.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/factory.go new file mode 100644 index 0000000000000..2ecff4860a60d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/factory.go @@ -0,0 +1,246 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/openshift/client-go/config/clientset/versioned" + config "github.com/openshift/client-go/config/informers/externalversions/config" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + transform cache.TransformFunc + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.shuttingDown { + return + } + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() + f.startedInformers[informerType] = true + } + } +} + +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + + Config() config.Interface +} + +func (f *sharedInformerFactory) Config() config.Interface { + return config.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go new file mode 100644 index 0000000000000..cb2395d1ef0d1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go @@ -0,0 +1,97 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + fmt "fmt" + + v1 "github.com/openshift/api/config/v1" + v1alpha1 "github.com/openshift/api/config/v1alpha1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=config.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("apiservers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().APIServers().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("authentications"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Authentications().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("builds"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Builds().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("clusteroperators"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().ClusterOperators().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("clusterversions"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().ClusterVersions().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("consoles"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Consoles().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("dnses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().DNSes().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("featuregates"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().FeatureGates().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("images"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Images().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("imagecontentpolicies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().ImageContentPolicies().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("imagedigestmirrorsets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().ImageDigestMirrorSets().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("imagetagmirrorsets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().ImageTagMirrorSets().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("infrastructures"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Infrastructures().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("ingresses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Ingresses().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("networks"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Networks().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("nodes"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Nodes().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("oauths"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().OAuths().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("operatorhubs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().OperatorHubs().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("projects"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Projects().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("proxies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Proxies().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("schedulers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Schedulers().Informer()}, nil + + // Group=config.openshift.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("backups"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1alpha1().Backups().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("clusterimagepolicies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1alpha1().ClusterImagePolicies().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("imagepolicies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1alpha1().ImagePolicies().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("insightsdatagathers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1alpha1().InsightsDataGathers().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000000..720235c485020 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,24 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/openshift/client-go/config/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/apiserver.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/apiserver.go new file mode 100644 index 0000000000000..59c5faa8a9b9f --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/apiserver.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// APIServerLister helps list APIServers. +// All objects returned here must be treated as read-only. +type APIServerLister interface { + // List lists all APIServers in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.APIServer, err error) + // Get retrieves the APIServer from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.APIServer, error) + APIServerListerExpansion +} + +// aPIServerLister implements the APIServerLister interface. +type aPIServerLister struct { + listers.ResourceIndexer[*configv1.APIServer] +} + +// NewAPIServerLister returns a new APIServerLister. +func NewAPIServerLister(indexer cache.Indexer) APIServerLister { + return &aPIServerLister{listers.New[*configv1.APIServer](indexer, configv1.Resource("apiserver"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/authentication.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/authentication.go new file mode 100644 index 0000000000000..242930e685d33 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/authentication.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// AuthenticationLister helps list Authentications. +// All objects returned here must be treated as read-only. +type AuthenticationLister interface { + // List lists all Authentications in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.Authentication, err error) + // Get retrieves the Authentication from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.Authentication, error) + AuthenticationListerExpansion +} + +// authenticationLister implements the AuthenticationLister interface. +type authenticationLister struct { + listers.ResourceIndexer[*configv1.Authentication] +} + +// NewAuthenticationLister returns a new AuthenticationLister. +func NewAuthenticationLister(indexer cache.Indexer) AuthenticationLister { + return &authenticationLister{listers.New[*configv1.Authentication](indexer, configv1.Resource("authentication"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/build.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/build.go new file mode 100644 index 0000000000000..b98accfeed59c --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/build.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// BuildLister helps list Builds. +// All objects returned here must be treated as read-only. +type BuildLister interface { + // List lists all Builds in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.Build, err error) + // Get retrieves the Build from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.Build, error) + BuildListerExpansion +} + +// buildLister implements the BuildLister interface. +type buildLister struct { + listers.ResourceIndexer[*configv1.Build] +} + +// NewBuildLister returns a new BuildLister. +func NewBuildLister(indexer cache.Indexer) BuildLister { + return &buildLister{listers.New[*configv1.Build](indexer, configv1.Resource("build"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/clusteroperator.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/clusteroperator.go new file mode 100644 index 0000000000000..a8eaacf78a678 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/clusteroperator.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterOperatorLister helps list ClusterOperators. +// All objects returned here must be treated as read-only. +type ClusterOperatorLister interface { + // List lists all ClusterOperators in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.ClusterOperator, err error) + // Get retrieves the ClusterOperator from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.ClusterOperator, error) + ClusterOperatorListerExpansion +} + +// clusterOperatorLister implements the ClusterOperatorLister interface. +type clusterOperatorLister struct { + listers.ResourceIndexer[*configv1.ClusterOperator] +} + +// NewClusterOperatorLister returns a new ClusterOperatorLister. +func NewClusterOperatorLister(indexer cache.Indexer) ClusterOperatorLister { + return &clusterOperatorLister{listers.New[*configv1.ClusterOperator](indexer, configv1.Resource("clusteroperator"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/clusterversion.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/clusterversion.go new file mode 100644 index 0000000000000..9f466ccb9dda5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/clusterversion.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterVersionLister helps list ClusterVersions. +// All objects returned here must be treated as read-only. +type ClusterVersionLister interface { + // List lists all ClusterVersions in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.ClusterVersion, err error) + // Get retrieves the ClusterVersion from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.ClusterVersion, error) + ClusterVersionListerExpansion +} + +// clusterVersionLister implements the ClusterVersionLister interface. +type clusterVersionLister struct { + listers.ResourceIndexer[*configv1.ClusterVersion] +} + +// NewClusterVersionLister returns a new ClusterVersionLister. +func NewClusterVersionLister(indexer cache.Indexer) ClusterVersionLister { + return &clusterVersionLister{listers.New[*configv1.ClusterVersion](indexer, configv1.Resource("clusterversion"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/console.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/console.go new file mode 100644 index 0000000000000..e9d9558e7c62b --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/console.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ConsoleLister helps list Consoles. +// All objects returned here must be treated as read-only. +type ConsoleLister interface { + // List lists all Consoles in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.Console, err error) + // Get retrieves the Console from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.Console, error) + ConsoleListerExpansion +} + +// consoleLister implements the ConsoleLister interface. +type consoleLister struct { + listers.ResourceIndexer[*configv1.Console] +} + +// NewConsoleLister returns a new ConsoleLister. +func NewConsoleLister(indexer cache.Indexer) ConsoleLister { + return &consoleLister{listers.New[*configv1.Console](indexer, configv1.Resource("console"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/dns.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/dns.go new file mode 100644 index 0000000000000..95dbcd0821968 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/dns.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// DNSLister helps list DNSes. +// All objects returned here must be treated as read-only. +type DNSLister interface { + // List lists all DNSes in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.DNS, err error) + // Get retrieves the DNS from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.DNS, error) + DNSListerExpansion +} + +// dNSLister implements the DNSLister interface. +type dNSLister struct { + listers.ResourceIndexer[*configv1.DNS] +} + +// NewDNSLister returns a new DNSLister. +func NewDNSLister(indexer cache.Indexer) DNSLister { + return &dNSLister{listers.New[*configv1.DNS](indexer, configv1.Resource("dns"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go new file mode 100644 index 0000000000000..b5d6fc088b067 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go @@ -0,0 +1,87 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// APIServerListerExpansion allows custom methods to be added to +// APIServerLister. +type APIServerListerExpansion interface{} + +// AuthenticationListerExpansion allows custom methods to be added to +// AuthenticationLister. +type AuthenticationListerExpansion interface{} + +// BuildListerExpansion allows custom methods to be added to +// BuildLister. +type BuildListerExpansion interface{} + +// ClusterOperatorListerExpansion allows custom methods to be added to +// ClusterOperatorLister. +type ClusterOperatorListerExpansion interface{} + +// ClusterVersionListerExpansion allows custom methods to be added to +// ClusterVersionLister. +type ClusterVersionListerExpansion interface{} + +// ConsoleListerExpansion allows custom methods to be added to +// ConsoleLister. +type ConsoleListerExpansion interface{} + +// DNSListerExpansion allows custom methods to be added to +// DNSLister. +type DNSListerExpansion interface{} + +// FeatureGateListerExpansion allows custom methods to be added to +// FeatureGateLister. +type FeatureGateListerExpansion interface{} + +// ImageListerExpansion allows custom methods to be added to +// ImageLister. +type ImageListerExpansion interface{} + +// ImageContentPolicyListerExpansion allows custom methods to be added to +// ImageContentPolicyLister. +type ImageContentPolicyListerExpansion interface{} + +// ImageDigestMirrorSetListerExpansion allows custom methods to be added to +// ImageDigestMirrorSetLister. +type ImageDigestMirrorSetListerExpansion interface{} + +// ImageTagMirrorSetListerExpansion allows custom methods to be added to +// ImageTagMirrorSetLister. +type ImageTagMirrorSetListerExpansion interface{} + +// InfrastructureListerExpansion allows custom methods to be added to +// InfrastructureLister. +type InfrastructureListerExpansion interface{} + +// IngressListerExpansion allows custom methods to be added to +// IngressLister. +type IngressListerExpansion interface{} + +// NetworkListerExpansion allows custom methods to be added to +// NetworkLister. +type NetworkListerExpansion interface{} + +// NodeListerExpansion allows custom methods to be added to +// NodeLister. +type NodeListerExpansion interface{} + +// OAuthListerExpansion allows custom methods to be added to +// OAuthLister. +type OAuthListerExpansion interface{} + +// OperatorHubListerExpansion allows custom methods to be added to +// OperatorHubLister. +type OperatorHubListerExpansion interface{} + +// ProjectListerExpansion allows custom methods to be added to +// ProjectLister. +type ProjectListerExpansion interface{} + +// ProxyListerExpansion allows custom methods to be added to +// ProxyLister. +type ProxyListerExpansion interface{} + +// SchedulerListerExpansion allows custom methods to be added to +// SchedulerLister. +type SchedulerListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/featuregate.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/featuregate.go new file mode 100644 index 0000000000000..7cedf7948872c --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/featuregate.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// FeatureGateLister helps list FeatureGates. +// All objects returned here must be treated as read-only. +type FeatureGateLister interface { + // List lists all FeatureGates in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.FeatureGate, err error) + // Get retrieves the FeatureGate from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.FeatureGate, error) + FeatureGateListerExpansion +} + +// featureGateLister implements the FeatureGateLister interface. +type featureGateLister struct { + listers.ResourceIndexer[*configv1.FeatureGate] +} + +// NewFeatureGateLister returns a new FeatureGateLister. +func NewFeatureGateLister(indexer cache.Indexer) FeatureGateLister { + return &featureGateLister{listers.New[*configv1.FeatureGate](indexer, configv1.Resource("featuregate"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/image.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/image.go new file mode 100644 index 0000000000000..407415393d9a9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/image.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ImageLister helps list Images. +// All objects returned here must be treated as read-only. +type ImageLister interface { + // List lists all Images in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.Image, err error) + // Get retrieves the Image from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.Image, error) + ImageListerExpansion +} + +// imageLister implements the ImageLister interface. +type imageLister struct { + listers.ResourceIndexer[*configv1.Image] +} + +// NewImageLister returns a new ImageLister. +func NewImageLister(indexer cache.Indexer) ImageLister { + return &imageLister{listers.New[*configv1.Image](indexer, configv1.Resource("image"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/imagecontentpolicy.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/imagecontentpolicy.go new file mode 100644 index 0000000000000..75607f9184709 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/imagecontentpolicy.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ImageContentPolicyLister helps list ImageContentPolicies. +// All objects returned here must be treated as read-only. +type ImageContentPolicyLister interface { + // List lists all ImageContentPolicies in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.ImageContentPolicy, err error) + // Get retrieves the ImageContentPolicy from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.ImageContentPolicy, error) + ImageContentPolicyListerExpansion +} + +// imageContentPolicyLister implements the ImageContentPolicyLister interface. +type imageContentPolicyLister struct { + listers.ResourceIndexer[*configv1.ImageContentPolicy] +} + +// NewImageContentPolicyLister returns a new ImageContentPolicyLister. +func NewImageContentPolicyLister(indexer cache.Indexer) ImageContentPolicyLister { + return &imageContentPolicyLister{listers.New[*configv1.ImageContentPolicy](indexer, configv1.Resource("imagecontentpolicy"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/imagedigestmirrorset.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/imagedigestmirrorset.go new file mode 100644 index 0000000000000..027ded8bb3e62 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/imagedigestmirrorset.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ImageDigestMirrorSetLister helps list ImageDigestMirrorSets. +// All objects returned here must be treated as read-only. +type ImageDigestMirrorSetLister interface { + // List lists all ImageDigestMirrorSets in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.ImageDigestMirrorSet, err error) + // Get retrieves the ImageDigestMirrorSet from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.ImageDigestMirrorSet, error) + ImageDigestMirrorSetListerExpansion +} + +// imageDigestMirrorSetLister implements the ImageDigestMirrorSetLister interface. +type imageDigestMirrorSetLister struct { + listers.ResourceIndexer[*configv1.ImageDigestMirrorSet] +} + +// NewImageDigestMirrorSetLister returns a new ImageDigestMirrorSetLister. +func NewImageDigestMirrorSetLister(indexer cache.Indexer) ImageDigestMirrorSetLister { + return &imageDigestMirrorSetLister{listers.New[*configv1.ImageDigestMirrorSet](indexer, configv1.Resource("imagedigestmirrorset"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/imagetagmirrorset.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/imagetagmirrorset.go new file mode 100644 index 0000000000000..d390bc14ec204 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/imagetagmirrorset.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ImageTagMirrorSetLister helps list ImageTagMirrorSets. +// All objects returned here must be treated as read-only. +type ImageTagMirrorSetLister interface { + // List lists all ImageTagMirrorSets in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.ImageTagMirrorSet, err error) + // Get retrieves the ImageTagMirrorSet from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.ImageTagMirrorSet, error) + ImageTagMirrorSetListerExpansion +} + +// imageTagMirrorSetLister implements the ImageTagMirrorSetLister interface. +type imageTagMirrorSetLister struct { + listers.ResourceIndexer[*configv1.ImageTagMirrorSet] +} + +// NewImageTagMirrorSetLister returns a new ImageTagMirrorSetLister. +func NewImageTagMirrorSetLister(indexer cache.Indexer) ImageTagMirrorSetLister { + return &imageTagMirrorSetLister{listers.New[*configv1.ImageTagMirrorSet](indexer, configv1.Resource("imagetagmirrorset"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/infrastructure.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/infrastructure.go new file mode 100644 index 0000000000000..48d592a295d26 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/infrastructure.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// InfrastructureLister helps list Infrastructures. +// All objects returned here must be treated as read-only. +type InfrastructureLister interface { + // List lists all Infrastructures in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.Infrastructure, err error) + // Get retrieves the Infrastructure from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.Infrastructure, error) + InfrastructureListerExpansion +} + +// infrastructureLister implements the InfrastructureLister interface. +type infrastructureLister struct { + listers.ResourceIndexer[*configv1.Infrastructure] +} + +// NewInfrastructureLister returns a new InfrastructureLister. +func NewInfrastructureLister(indexer cache.Indexer) InfrastructureLister { + return &infrastructureLister{listers.New[*configv1.Infrastructure](indexer, configv1.Resource("infrastructure"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/ingress.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/ingress.go new file mode 100644 index 0000000000000..81538435fb206 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/ingress.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// IngressLister helps list Ingresses. +// All objects returned here must be treated as read-only. +type IngressLister interface { + // List lists all Ingresses in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.Ingress, err error) + // Get retrieves the Ingress from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.Ingress, error) + IngressListerExpansion +} + +// ingressLister implements the IngressLister interface. +type ingressLister struct { + listers.ResourceIndexer[*configv1.Ingress] +} + +// NewIngressLister returns a new IngressLister. +func NewIngressLister(indexer cache.Indexer) IngressLister { + return &ingressLister{listers.New[*configv1.Ingress](indexer, configv1.Resource("ingress"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/network.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/network.go new file mode 100644 index 0000000000000..3376a46b11c5c --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/network.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// NetworkLister helps list Networks. +// All objects returned here must be treated as read-only. +type NetworkLister interface { + // List lists all Networks in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.Network, err error) + // Get retrieves the Network from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.Network, error) + NetworkListerExpansion +} + +// networkLister implements the NetworkLister interface. +type networkLister struct { + listers.ResourceIndexer[*configv1.Network] +} + +// NewNetworkLister returns a new NetworkLister. +func NewNetworkLister(indexer cache.Indexer) NetworkLister { + return &networkLister{listers.New[*configv1.Network](indexer, configv1.Resource("network"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/node.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/node.go new file mode 100644 index 0000000000000..2520016a51d02 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/node.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// NodeLister helps list Nodes. +// All objects returned here must be treated as read-only. +type NodeLister interface { + // List lists all Nodes in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.Node, err error) + // Get retrieves the Node from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.Node, error) + NodeListerExpansion +} + +// nodeLister implements the NodeLister interface. +type nodeLister struct { + listers.ResourceIndexer[*configv1.Node] +} + +// NewNodeLister returns a new NodeLister. +func NewNodeLister(indexer cache.Indexer) NodeLister { + return &nodeLister{listers.New[*configv1.Node](indexer, configv1.Resource("node"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/oauth.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/oauth.go new file mode 100644 index 0000000000000..5cffcd7bf6921 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/oauth.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// OAuthLister helps list OAuths. +// All objects returned here must be treated as read-only. +type OAuthLister interface { + // List lists all OAuths in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.OAuth, err error) + // Get retrieves the OAuth from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.OAuth, error) + OAuthListerExpansion +} + +// oAuthLister implements the OAuthLister interface. +type oAuthLister struct { + listers.ResourceIndexer[*configv1.OAuth] +} + +// NewOAuthLister returns a new OAuthLister. +func NewOAuthLister(indexer cache.Indexer) OAuthLister { + return &oAuthLister{listers.New[*configv1.OAuth](indexer, configv1.Resource("oauth"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/operatorhub.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/operatorhub.go new file mode 100644 index 0000000000000..a28f63f7917e8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/operatorhub.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// OperatorHubLister helps list OperatorHubs. +// All objects returned here must be treated as read-only. +type OperatorHubLister interface { + // List lists all OperatorHubs in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.OperatorHub, err error) + // Get retrieves the OperatorHub from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.OperatorHub, error) + OperatorHubListerExpansion +} + +// operatorHubLister implements the OperatorHubLister interface. +type operatorHubLister struct { + listers.ResourceIndexer[*configv1.OperatorHub] +} + +// NewOperatorHubLister returns a new OperatorHubLister. +func NewOperatorHubLister(indexer cache.Indexer) OperatorHubLister { + return &operatorHubLister{listers.New[*configv1.OperatorHub](indexer, configv1.Resource("operatorhub"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/project.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/project.go new file mode 100644 index 0000000000000..fbc57217fa79d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/project.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ProjectLister helps list Projects. +// All objects returned here must be treated as read-only. +type ProjectLister interface { + // List lists all Projects in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.Project, err error) + // Get retrieves the Project from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.Project, error) + ProjectListerExpansion +} + +// projectLister implements the ProjectLister interface. +type projectLister struct { + listers.ResourceIndexer[*configv1.Project] +} + +// NewProjectLister returns a new ProjectLister. +func NewProjectLister(indexer cache.Indexer) ProjectLister { + return &projectLister{listers.New[*configv1.Project](indexer, configv1.Resource("project"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/proxy.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/proxy.go new file mode 100644 index 0000000000000..8edbd0fab53a3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/proxy.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ProxyLister helps list Proxies. +// All objects returned here must be treated as read-only. +type ProxyLister interface { + // List lists all Proxies in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.Proxy, err error) + // Get retrieves the Proxy from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.Proxy, error) + ProxyListerExpansion +} + +// proxyLister implements the ProxyLister interface. +type proxyLister struct { + listers.ResourceIndexer[*configv1.Proxy] +} + +// NewProxyLister returns a new ProxyLister. +func NewProxyLister(indexer cache.Indexer) ProxyLister { + return &proxyLister{listers.New[*configv1.Proxy](indexer, configv1.Resource("proxy"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/scheduler.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/scheduler.go new file mode 100644 index 0000000000000..a90829c8dd120 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/scheduler.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// SchedulerLister helps list Schedulers. +// All objects returned here must be treated as read-only. +type SchedulerLister interface { + // List lists all Schedulers in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.Scheduler, err error) + // Get retrieves the Scheduler from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.Scheduler, error) + SchedulerListerExpansion +} + +// schedulerLister implements the SchedulerLister interface. +type schedulerLister struct { + listers.ResourceIndexer[*configv1.Scheduler] +} + +// NewSchedulerLister returns a new SchedulerLister. +func NewSchedulerLister(indexer cache.Indexer) SchedulerLister { + return &schedulerLister{listers.New[*configv1.Scheduler](indexer, configv1.Resource("scheduler"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/backup.go b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/backup.go new file mode 100644 index 0000000000000..6b992e0d03a65 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/backup.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// BackupLister helps list Backups. +// All objects returned here must be treated as read-only. +type BackupLister interface { + // List lists all Backups in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1alpha1.Backup, err error) + // Get retrieves the Backup from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1alpha1.Backup, error) + BackupListerExpansion +} + +// backupLister implements the BackupLister interface. +type backupLister struct { + listers.ResourceIndexer[*configv1alpha1.Backup] +} + +// NewBackupLister returns a new BackupLister. +func NewBackupLister(indexer cache.Indexer) BackupLister { + return &backupLister{listers.New[*configv1alpha1.Backup](indexer, configv1alpha1.Resource("backup"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/clusterimagepolicy.go b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/clusterimagepolicy.go new file mode 100644 index 0000000000000..0512d3682f0c2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/clusterimagepolicy.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterImagePolicyLister helps list ClusterImagePolicies. +// All objects returned here must be treated as read-only. +type ClusterImagePolicyLister interface { + // List lists all ClusterImagePolicies in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1alpha1.ClusterImagePolicy, err error) + // Get retrieves the ClusterImagePolicy from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1alpha1.ClusterImagePolicy, error) + ClusterImagePolicyListerExpansion +} + +// clusterImagePolicyLister implements the ClusterImagePolicyLister interface. +type clusterImagePolicyLister struct { + listers.ResourceIndexer[*configv1alpha1.ClusterImagePolicy] +} + +// NewClusterImagePolicyLister returns a new ClusterImagePolicyLister. +func NewClusterImagePolicyLister(indexer cache.Indexer) ClusterImagePolicyLister { + return &clusterImagePolicyLister{listers.New[*configv1alpha1.ClusterImagePolicy](indexer, configv1alpha1.Resource("clusterimagepolicy"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/expansion_generated.go b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/expansion_generated.go new file mode 100644 index 0000000000000..97e64a7cc977e --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/expansion_generated.go @@ -0,0 +1,23 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// BackupListerExpansion allows custom methods to be added to +// BackupLister. +type BackupListerExpansion interface{} + +// ClusterImagePolicyListerExpansion allows custom methods to be added to +// ClusterImagePolicyLister. +type ClusterImagePolicyListerExpansion interface{} + +// ImagePolicyListerExpansion allows custom methods to be added to +// ImagePolicyLister. +type ImagePolicyListerExpansion interface{} + +// ImagePolicyNamespaceListerExpansion allows custom methods to be added to +// ImagePolicyNamespaceLister. +type ImagePolicyNamespaceListerExpansion interface{} + +// InsightsDataGatherListerExpansion allows custom methods to be added to +// InsightsDataGatherLister. +type InsightsDataGatherListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/imagepolicy.go b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/imagepolicy.go new file mode 100644 index 0000000000000..7050c577186f1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/imagepolicy.go @@ -0,0 +1,54 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ImagePolicyLister helps list ImagePolicies. +// All objects returned here must be treated as read-only. +type ImagePolicyLister interface { + // List lists all ImagePolicies in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1alpha1.ImagePolicy, err error) + // ImagePolicies returns an object that can list and get ImagePolicies. + ImagePolicies(namespace string) ImagePolicyNamespaceLister + ImagePolicyListerExpansion +} + +// imagePolicyLister implements the ImagePolicyLister interface. +type imagePolicyLister struct { + listers.ResourceIndexer[*configv1alpha1.ImagePolicy] +} + +// NewImagePolicyLister returns a new ImagePolicyLister. +func NewImagePolicyLister(indexer cache.Indexer) ImagePolicyLister { + return &imagePolicyLister{listers.New[*configv1alpha1.ImagePolicy](indexer, configv1alpha1.Resource("imagepolicy"))} +} + +// ImagePolicies returns an object that can list and get ImagePolicies. +func (s *imagePolicyLister) ImagePolicies(namespace string) ImagePolicyNamespaceLister { + return imagePolicyNamespaceLister{listers.NewNamespaced[*configv1alpha1.ImagePolicy](s.ResourceIndexer, namespace)} +} + +// ImagePolicyNamespaceLister helps list and get ImagePolicies. +// All objects returned here must be treated as read-only. +type ImagePolicyNamespaceLister interface { + // List lists all ImagePolicies in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1alpha1.ImagePolicy, err error) + // Get retrieves the ImagePolicy from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1alpha1.ImagePolicy, error) + ImagePolicyNamespaceListerExpansion +} + +// imagePolicyNamespaceLister implements the ImagePolicyNamespaceLister +// interface. +type imagePolicyNamespaceLister struct { + listers.ResourceIndexer[*configv1alpha1.ImagePolicy] +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/insightsdatagather.go new file mode 100644 index 0000000000000..9328022a46ba3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/insightsdatagather.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// InsightsDataGatherLister helps list InsightsDataGathers. +// All objects returned here must be treated as read-only. +type InsightsDataGatherLister interface { + // List lists all InsightsDataGathers in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1alpha1.InsightsDataGather, err error) + // Get retrieves the InsightsDataGather from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1alpha1.InsightsDataGather, error) + InsightsDataGatherListerExpansion +} + +// insightsDataGatherLister implements the InsightsDataGatherLister interface. +type insightsDataGatherLister struct { + listers.ResourceIndexer[*configv1alpha1.InsightsDataGather] +} + +// NewInsightsDataGatherLister returns a new InsightsDataGatherLister. +func NewInsightsDataGatherLister(indexer cache.Indexer) InsightsDataGatherLister { + return &insightsDataGatherLister{listers.New[*configv1alpha1.InsightsDataGather](indexer, configv1alpha1.Resource("insightsdatagather"))} +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/image.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/image.go new file mode 100644 index 0000000000000..d3110a3e6b077 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/image.go @@ -0,0 +1,336 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + imagev1 "github.com/openshift/api/image/v1" + internal "github.com/openshift/client-go/image/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ImageApplyConfiguration represents a declarative configuration of the Image type for use +// with apply. +type ImageApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + DockerImageReference *string `json:"dockerImageReference,omitempty"` + DockerImageMetadata *runtime.RawExtension `json:"dockerImageMetadata,omitempty"` + DockerImageMetadataVersion *string `json:"dockerImageMetadataVersion,omitempty"` + DockerImageManifest *string `json:"dockerImageManifest,omitempty"` + DockerImageLayers []ImageLayerApplyConfiguration `json:"dockerImageLayers,omitempty"` + Signatures []ImageSignatureApplyConfiguration `json:"signatures,omitempty"` + DockerImageSignatures [][]byte `json:"dockerImageSignatures,omitempty"` + DockerImageManifestMediaType *string `json:"dockerImageManifestMediaType,omitempty"` + DockerImageConfig *string `json:"dockerImageConfig,omitempty"` + DockerImageManifests []ImageManifestApplyConfiguration `json:"dockerImageManifests,omitempty"` +} + +// Image constructs a declarative configuration of the Image type for use with +// apply. +func Image(name string) *ImageApplyConfiguration { + b := &ImageApplyConfiguration{} + b.WithName(name) + b.WithKind("Image") + b.WithAPIVersion("image.openshift.io/v1") + return b +} + +// ExtractImage extracts the applied configuration owned by fieldManager from +// image. If no managedFields are found in image for fieldManager, a +// ImageApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// image must be a unmodified Image API object that was retrieved from the Kubernetes API. +// ExtractImage provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractImage(image *imagev1.Image, fieldManager string) (*ImageApplyConfiguration, error) { + return extractImage(image, fieldManager, "") +} + +// ExtractImageStatus is the same as ExtractImage except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractImageStatus(image *imagev1.Image, fieldManager string) (*ImageApplyConfiguration, error) { + return extractImage(image, fieldManager, "status") +} + +func extractImage(image *imagev1.Image, fieldManager string, subresource string) (*ImageApplyConfiguration, error) { + b := &ImageApplyConfiguration{} + err := managedfields.ExtractInto(image, internal.Parser().Type("com.github.openshift.api.image.v1.Image"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(image.Name) + + b.WithKind("Image") + b.WithAPIVersion("image.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithKind(value string) *ImageApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithAPIVersion(value string) *ImageApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithName(value string) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithGenerateName(value string) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithNamespace(value string) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithUID(value types.UID) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithResourceVersion(value string) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithGeneration(value int64) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ImageApplyConfiguration) WithLabels(entries map[string]string) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ImageApplyConfiguration) WithAnnotations(entries map[string]string) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ImageApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ImageApplyConfiguration) WithFinalizers(values ...string) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ImageApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithDockerImageReference sets the DockerImageReference field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DockerImageReference field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithDockerImageReference(value string) *ImageApplyConfiguration { + b.DockerImageReference = &value + return b +} + +// WithDockerImageMetadata sets the DockerImageMetadata field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DockerImageMetadata field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithDockerImageMetadata(value runtime.RawExtension) *ImageApplyConfiguration { + b.DockerImageMetadata = &value + return b +} + +// WithDockerImageMetadataVersion sets the DockerImageMetadataVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DockerImageMetadataVersion field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithDockerImageMetadataVersion(value string) *ImageApplyConfiguration { + b.DockerImageMetadataVersion = &value + return b +} + +// WithDockerImageManifest sets the DockerImageManifest field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DockerImageManifest field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithDockerImageManifest(value string) *ImageApplyConfiguration { + b.DockerImageManifest = &value + return b +} + +// WithDockerImageLayers adds the given value to the DockerImageLayers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the DockerImageLayers field. +func (b *ImageApplyConfiguration) WithDockerImageLayers(values ...*ImageLayerApplyConfiguration) *ImageApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithDockerImageLayers") + } + b.DockerImageLayers = append(b.DockerImageLayers, *values[i]) + } + return b +} + +// WithSignatures adds the given value to the Signatures field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Signatures field. +func (b *ImageApplyConfiguration) WithSignatures(values ...*ImageSignatureApplyConfiguration) *ImageApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSignatures") + } + b.Signatures = append(b.Signatures, *values[i]) + } + return b +} + +// WithDockerImageSignatures adds the given value to the DockerImageSignatures field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the DockerImageSignatures field. +func (b *ImageApplyConfiguration) WithDockerImageSignatures(values ...[]byte) *ImageApplyConfiguration { + for i := range values { + b.DockerImageSignatures = append(b.DockerImageSignatures, values[i]) + } + return b +} + +// WithDockerImageManifestMediaType sets the DockerImageManifestMediaType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DockerImageManifestMediaType field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithDockerImageManifestMediaType(value string) *ImageApplyConfiguration { + b.DockerImageManifestMediaType = &value + return b +} + +// WithDockerImageConfig sets the DockerImageConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DockerImageConfig field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithDockerImageConfig(value string) *ImageApplyConfiguration { + b.DockerImageConfig = &value + return b +} + +// WithDockerImageManifests adds the given value to the DockerImageManifests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the DockerImageManifests field. +func (b *ImageApplyConfiguration) WithDockerImageManifests(values ...*ImageManifestApplyConfiguration) *ImageApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithDockerImageManifests") + } + b.DockerImageManifests = append(b.DockerImageManifests, *values[i]) + } + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ImageApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagelayer.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagelayer.go new file mode 100644 index 0000000000000..09dc0f931faaf --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagelayer.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ImageLayerApplyConfiguration represents a declarative configuration of the ImageLayer type for use +// with apply. +type ImageLayerApplyConfiguration struct { + Name *string `json:"name,omitempty"` + LayerSize *int64 `json:"size,omitempty"` + MediaType *string `json:"mediaType,omitempty"` +} + +// ImageLayerApplyConfiguration constructs a declarative configuration of the ImageLayer type for use with +// apply. +func ImageLayer() *ImageLayerApplyConfiguration { + return &ImageLayerApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImageLayerApplyConfiguration) WithName(value string) *ImageLayerApplyConfiguration { + b.Name = &value + return b +} + +// WithLayerSize sets the LayerSize field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LayerSize field is set to the value of the last call. +func (b *ImageLayerApplyConfiguration) WithLayerSize(value int64) *ImageLayerApplyConfiguration { + b.LayerSize = &value + return b +} + +// WithMediaType sets the MediaType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MediaType field is set to the value of the last call. +func (b *ImageLayerApplyConfiguration) WithMediaType(value string) *ImageLayerApplyConfiguration { + b.MediaType = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagelookuppolicy.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagelookuppolicy.go new file mode 100644 index 0000000000000..ecc95d10b1a49 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagelookuppolicy.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ImageLookupPolicyApplyConfiguration represents a declarative configuration of the ImageLookupPolicy type for use +// with apply. +type ImageLookupPolicyApplyConfiguration struct { + Local *bool `json:"local,omitempty"` +} + +// ImageLookupPolicyApplyConfiguration constructs a declarative configuration of the ImageLookupPolicy type for use with +// apply. +func ImageLookupPolicy() *ImageLookupPolicyApplyConfiguration { + return &ImageLookupPolicyApplyConfiguration{} +} + +// WithLocal sets the Local field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Local field is set to the value of the last call. +func (b *ImageLookupPolicyApplyConfiguration) WithLocal(value bool) *ImageLookupPolicyApplyConfiguration { + b.Local = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagemanifest.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagemanifest.go new file mode 100644 index 0000000000000..5368f96a6646d --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagemanifest.go @@ -0,0 +1,68 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ImageManifestApplyConfiguration represents a declarative configuration of the ImageManifest type for use +// with apply. +type ImageManifestApplyConfiguration struct { + Digest *string `json:"digest,omitempty"` + MediaType *string `json:"mediaType,omitempty"` + ManifestSize *int64 `json:"manifestSize,omitempty"` + Architecture *string `json:"architecture,omitempty"` + OS *string `json:"os,omitempty"` + Variant *string `json:"variant,omitempty"` +} + +// ImageManifestApplyConfiguration constructs a declarative configuration of the ImageManifest type for use with +// apply. +func ImageManifest() *ImageManifestApplyConfiguration { + return &ImageManifestApplyConfiguration{} +} + +// WithDigest sets the Digest field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Digest field is set to the value of the last call. +func (b *ImageManifestApplyConfiguration) WithDigest(value string) *ImageManifestApplyConfiguration { + b.Digest = &value + return b +} + +// WithMediaType sets the MediaType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MediaType field is set to the value of the last call. +func (b *ImageManifestApplyConfiguration) WithMediaType(value string) *ImageManifestApplyConfiguration { + b.MediaType = &value + return b +} + +// WithManifestSize sets the ManifestSize field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManifestSize field is set to the value of the last call. +func (b *ImageManifestApplyConfiguration) WithManifestSize(value int64) *ImageManifestApplyConfiguration { + b.ManifestSize = &value + return b +} + +// WithArchitecture sets the Architecture field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Architecture field is set to the value of the last call. +func (b *ImageManifestApplyConfiguration) WithArchitecture(value string) *ImageManifestApplyConfiguration { + b.Architecture = &value + return b +} + +// WithOS sets the OS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OS field is set to the value of the last call. +func (b *ImageManifestApplyConfiguration) WithOS(value string) *ImageManifestApplyConfiguration { + b.OS = &value + return b +} + +// WithVariant sets the Variant field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Variant field is set to the value of the last call. +func (b *ImageManifestApplyConfiguration) WithVariant(value string) *ImageManifestApplyConfiguration { + b.Variant = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagesignature.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagesignature.go new file mode 100644 index 0000000000000..93e5abc04aea6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagesignature.go @@ -0,0 +1,275 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ImageSignatureApplyConfiguration represents a declarative configuration of the ImageSignature type for use +// with apply. +type ImageSignatureApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Type *string `json:"type,omitempty"` + Content []byte `json:"content,omitempty"` + Conditions []SignatureConditionApplyConfiguration `json:"conditions,omitempty"` + ImageIdentity *string `json:"imageIdentity,omitempty"` + SignedClaims map[string]string `json:"signedClaims,omitempty"` + Created *apismetav1.Time `json:"created,omitempty"` + IssuedBy *SignatureIssuerApplyConfiguration `json:"issuedBy,omitempty"` + IssuedTo *SignatureSubjectApplyConfiguration `json:"issuedTo,omitempty"` +} + +// ImageSignature constructs a declarative configuration of the ImageSignature type for use with +// apply. +func ImageSignature(name string) *ImageSignatureApplyConfiguration { + b := &ImageSignatureApplyConfiguration{} + b.WithName(name) + b.WithKind("ImageSignature") + b.WithAPIVersion("image.openshift.io/v1") + return b +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithKind(value string) *ImageSignatureApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithAPIVersion(value string) *ImageSignatureApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithName(value string) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithGenerateName(value string) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithNamespace(value string) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithUID(value types.UID) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithResourceVersion(value string) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithGeneration(value int64) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ImageSignatureApplyConfiguration) WithLabels(entries map[string]string) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ImageSignatureApplyConfiguration) WithAnnotations(entries map[string]string) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ImageSignatureApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ImageSignatureApplyConfiguration) WithFinalizers(values ...string) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ImageSignatureApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithType(value string) *ImageSignatureApplyConfiguration { + b.Type = &value + return b +} + +// WithContent adds the given value to the Content field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Content field. +func (b *ImageSignatureApplyConfiguration) WithContent(values ...byte) *ImageSignatureApplyConfiguration { + for i := range values { + b.Content = append(b.Content, values[i]) + } + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ImageSignatureApplyConfiguration) WithConditions(values ...*SignatureConditionApplyConfiguration) *ImageSignatureApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithImageIdentity sets the ImageIdentity field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageIdentity field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithImageIdentity(value string) *ImageSignatureApplyConfiguration { + b.ImageIdentity = &value + return b +} + +// WithSignedClaims puts the entries into the SignedClaims field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the SignedClaims field, +// overwriting an existing map entries in SignedClaims field with the same key. +func (b *ImageSignatureApplyConfiguration) WithSignedClaims(entries map[string]string) *ImageSignatureApplyConfiguration { + if b.SignedClaims == nil && len(entries) > 0 { + b.SignedClaims = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.SignedClaims[k] = v + } + return b +} + +// WithCreated sets the Created field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Created field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithCreated(value apismetav1.Time) *ImageSignatureApplyConfiguration { + b.Created = &value + return b +} + +// WithIssuedBy sets the IssuedBy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IssuedBy field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithIssuedBy(value *SignatureIssuerApplyConfiguration) *ImageSignatureApplyConfiguration { + b.IssuedBy = value + return b +} + +// WithIssuedTo sets the IssuedTo field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IssuedTo field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithIssuedTo(value *SignatureSubjectApplyConfiguration) *ImageSignatureApplyConfiguration { + b.IssuedTo = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ImageSignatureApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestream.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestream.go new file mode 100644 index 0000000000000..ad895e190501a --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestream.go @@ -0,0 +1,248 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + imagev1 "github.com/openshift/api/image/v1" + internal "github.com/openshift/client-go/image/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ImageStreamApplyConfiguration represents a declarative configuration of the ImageStream type for use +// with apply. +type ImageStreamApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ImageStreamSpecApplyConfiguration `json:"spec,omitempty"` + Status *ImageStreamStatusApplyConfiguration `json:"status,omitempty"` +} + +// ImageStream constructs a declarative configuration of the ImageStream type for use with +// apply. +func ImageStream(name, namespace string) *ImageStreamApplyConfiguration { + b := &ImageStreamApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("ImageStream") + b.WithAPIVersion("image.openshift.io/v1") + return b +} + +// ExtractImageStream extracts the applied configuration owned by fieldManager from +// imageStream. If no managedFields are found in imageStream for fieldManager, a +// ImageStreamApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// imageStream must be a unmodified ImageStream API object that was retrieved from the Kubernetes API. +// ExtractImageStream provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractImageStream(imageStream *imagev1.ImageStream, fieldManager string) (*ImageStreamApplyConfiguration, error) { + return extractImageStream(imageStream, fieldManager, "") +} + +// ExtractImageStreamStatus is the same as ExtractImageStream except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractImageStreamStatus(imageStream *imagev1.ImageStream, fieldManager string) (*ImageStreamApplyConfiguration, error) { + return extractImageStream(imageStream, fieldManager, "status") +} + +func extractImageStream(imageStream *imagev1.ImageStream, fieldManager string, subresource string) (*ImageStreamApplyConfiguration, error) { + b := &ImageStreamApplyConfiguration{} + err := managedfields.ExtractInto(imageStream, internal.Parser().Type("com.github.openshift.api.image.v1.ImageStream"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(imageStream.Name) + b.WithNamespace(imageStream.Namespace) + + b.WithKind("ImageStream") + b.WithAPIVersion("image.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithKind(value string) *ImageStreamApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithAPIVersion(value string) *ImageStreamApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithName(value string) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithGenerateName(value string) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithNamespace(value string) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithUID(value types.UID) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithResourceVersion(value string) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithGeneration(value int64) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ImageStreamApplyConfiguration) WithLabels(entries map[string]string) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ImageStreamApplyConfiguration) WithAnnotations(entries map[string]string) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ImageStreamApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ImageStreamApplyConfiguration) WithFinalizers(values ...string) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ImageStreamApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithSpec(value *ImageStreamSpecApplyConfiguration) *ImageStreamApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithStatus(value *ImageStreamStatusApplyConfiguration) *ImageStreamApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ImageStreamApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreammapping.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreammapping.go new file mode 100644 index 0000000000000..97e99ff088afc --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreammapping.go @@ -0,0 +1,248 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + imagev1 "github.com/openshift/api/image/v1" + internal "github.com/openshift/client-go/image/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ImageStreamMappingApplyConfiguration represents a declarative configuration of the ImageStreamMapping type for use +// with apply. +type ImageStreamMappingApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Image *ImageApplyConfiguration `json:"image,omitempty"` + Tag *string `json:"tag,omitempty"` +} + +// ImageStreamMapping constructs a declarative configuration of the ImageStreamMapping type for use with +// apply. +func ImageStreamMapping(name, namespace string) *ImageStreamMappingApplyConfiguration { + b := &ImageStreamMappingApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("ImageStreamMapping") + b.WithAPIVersion("image.openshift.io/v1") + return b +} + +// ExtractImageStreamMapping extracts the applied configuration owned by fieldManager from +// imageStreamMapping. If no managedFields are found in imageStreamMapping for fieldManager, a +// ImageStreamMappingApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// imageStreamMapping must be a unmodified ImageStreamMapping API object that was retrieved from the Kubernetes API. +// ExtractImageStreamMapping provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractImageStreamMapping(imageStreamMapping *imagev1.ImageStreamMapping, fieldManager string) (*ImageStreamMappingApplyConfiguration, error) { + return extractImageStreamMapping(imageStreamMapping, fieldManager, "") +} + +// ExtractImageStreamMappingStatus is the same as ExtractImageStreamMapping except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractImageStreamMappingStatus(imageStreamMapping *imagev1.ImageStreamMapping, fieldManager string) (*ImageStreamMappingApplyConfiguration, error) { + return extractImageStreamMapping(imageStreamMapping, fieldManager, "status") +} + +func extractImageStreamMapping(imageStreamMapping *imagev1.ImageStreamMapping, fieldManager string, subresource string) (*ImageStreamMappingApplyConfiguration, error) { + b := &ImageStreamMappingApplyConfiguration{} + err := managedfields.ExtractInto(imageStreamMapping, internal.Parser().Type("com.github.openshift.api.image.v1.ImageStreamMapping"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(imageStreamMapping.Name) + b.WithNamespace(imageStreamMapping.Namespace) + + b.WithKind("ImageStreamMapping") + b.WithAPIVersion("image.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithKind(value string) *ImageStreamMappingApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithAPIVersion(value string) *ImageStreamMappingApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithName(value string) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithGenerateName(value string) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithNamespace(value string) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithUID(value types.UID) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithResourceVersion(value string) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithGeneration(value int64) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ImageStreamMappingApplyConfiguration) WithLabels(entries map[string]string) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ImageStreamMappingApplyConfiguration) WithAnnotations(entries map[string]string) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ImageStreamMappingApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ImageStreamMappingApplyConfiguration) WithFinalizers(values ...string) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ImageStreamMappingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithImage(value *ImageApplyConfiguration) *ImageStreamMappingApplyConfiguration { + b.Image = value + return b +} + +// WithTag sets the Tag field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Tag field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithTag(value string) *ImageStreamMappingApplyConfiguration { + b.Tag = &value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ImageStreamMappingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreamspec.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreamspec.go new file mode 100644 index 0000000000000..09d777f17340e --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreamspec.go @@ -0,0 +1,46 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ImageStreamSpecApplyConfiguration represents a declarative configuration of the ImageStreamSpec type for use +// with apply. +type ImageStreamSpecApplyConfiguration struct { + LookupPolicy *ImageLookupPolicyApplyConfiguration `json:"lookupPolicy,omitempty"` + DockerImageRepository *string `json:"dockerImageRepository,omitempty"` + Tags []TagReferenceApplyConfiguration `json:"tags,omitempty"` +} + +// ImageStreamSpecApplyConfiguration constructs a declarative configuration of the ImageStreamSpec type for use with +// apply. +func ImageStreamSpec() *ImageStreamSpecApplyConfiguration { + return &ImageStreamSpecApplyConfiguration{} +} + +// WithLookupPolicy sets the LookupPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LookupPolicy field is set to the value of the last call. +func (b *ImageStreamSpecApplyConfiguration) WithLookupPolicy(value *ImageLookupPolicyApplyConfiguration) *ImageStreamSpecApplyConfiguration { + b.LookupPolicy = value + return b +} + +// WithDockerImageRepository sets the DockerImageRepository field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DockerImageRepository field is set to the value of the last call. +func (b *ImageStreamSpecApplyConfiguration) WithDockerImageRepository(value string) *ImageStreamSpecApplyConfiguration { + b.DockerImageRepository = &value + return b +} + +// WithTags adds the given value to the Tags field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tags field. +func (b *ImageStreamSpecApplyConfiguration) WithTags(values ...*TagReferenceApplyConfiguration) *ImageStreamSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithTags") + } + b.Tags = append(b.Tags, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreamstatus.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreamstatus.go new file mode 100644 index 0000000000000..e2ab24aa8c635 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreamstatus.go @@ -0,0 +1,46 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ImageStreamStatusApplyConfiguration represents a declarative configuration of the ImageStreamStatus type for use +// with apply. +type ImageStreamStatusApplyConfiguration struct { + DockerImageRepository *string `json:"dockerImageRepository,omitempty"` + PublicDockerImageRepository *string `json:"publicDockerImageRepository,omitempty"` + Tags []NamedTagEventListApplyConfiguration `json:"tags,omitempty"` +} + +// ImageStreamStatusApplyConfiguration constructs a declarative configuration of the ImageStreamStatus type for use with +// apply. +func ImageStreamStatus() *ImageStreamStatusApplyConfiguration { + return &ImageStreamStatusApplyConfiguration{} +} + +// WithDockerImageRepository sets the DockerImageRepository field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DockerImageRepository field is set to the value of the last call. +func (b *ImageStreamStatusApplyConfiguration) WithDockerImageRepository(value string) *ImageStreamStatusApplyConfiguration { + b.DockerImageRepository = &value + return b +} + +// WithPublicDockerImageRepository sets the PublicDockerImageRepository field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PublicDockerImageRepository field is set to the value of the last call. +func (b *ImageStreamStatusApplyConfiguration) WithPublicDockerImageRepository(value string) *ImageStreamStatusApplyConfiguration { + b.PublicDockerImageRepository = &value + return b +} + +// WithTags adds the given value to the Tags field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tags field. +func (b *ImageStreamStatusApplyConfiguration) WithTags(values ...*NamedTagEventListApplyConfiguration) *ImageStreamStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithTags") + } + b.Tags = append(b.Tags, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/namedtageventlist.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/namedtageventlist.go new file mode 100644 index 0000000000000..92b096aad0182 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/namedtageventlist.go @@ -0,0 +1,51 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NamedTagEventListApplyConfiguration represents a declarative configuration of the NamedTagEventList type for use +// with apply. +type NamedTagEventListApplyConfiguration struct { + Tag *string `json:"tag,omitempty"` + Items []TagEventApplyConfiguration `json:"items,omitempty"` + Conditions []TagEventConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// NamedTagEventListApplyConfiguration constructs a declarative configuration of the NamedTagEventList type for use with +// apply. +func NamedTagEventList() *NamedTagEventListApplyConfiguration { + return &NamedTagEventListApplyConfiguration{} +} + +// WithTag sets the Tag field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Tag field is set to the value of the last call. +func (b *NamedTagEventListApplyConfiguration) WithTag(value string) *NamedTagEventListApplyConfiguration { + b.Tag = &value + return b +} + +// WithItems adds the given value to the Items field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Items field. +func (b *NamedTagEventListApplyConfiguration) WithItems(values ...*TagEventApplyConfiguration) *NamedTagEventListApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithItems") + } + b.Items = append(b.Items, *values[i]) + } + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *NamedTagEventListApplyConfiguration) WithConditions(values ...*TagEventConditionApplyConfiguration) *NamedTagEventListApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturecondition.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturecondition.go new file mode 100644 index 0000000000000..232bf7bf96598 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturecondition.go @@ -0,0 +1,74 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + imagev1 "github.com/openshift/api/image/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// SignatureConditionApplyConfiguration represents a declarative configuration of the SignatureCondition type for use +// with apply. +type SignatureConditionApplyConfiguration struct { + Type *imagev1.SignatureConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// SignatureConditionApplyConfiguration constructs a declarative configuration of the SignatureCondition type for use with +// apply. +func SignatureCondition() *SignatureConditionApplyConfiguration { + return &SignatureConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *SignatureConditionApplyConfiguration) WithType(value imagev1.SignatureConditionType) *SignatureConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *SignatureConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *SignatureConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastProbeTime sets the LastProbeTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastProbeTime field is set to the value of the last call. +func (b *SignatureConditionApplyConfiguration) WithLastProbeTime(value metav1.Time) *SignatureConditionApplyConfiguration { + b.LastProbeTime = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *SignatureConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *SignatureConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *SignatureConditionApplyConfiguration) WithReason(value string) *SignatureConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *SignatureConditionApplyConfiguration) WithMessage(value string) *SignatureConditionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturegenericentity.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturegenericentity.go new file mode 100644 index 0000000000000..1e40e2ab190c7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturegenericentity.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// SignatureGenericEntityApplyConfiguration represents a declarative configuration of the SignatureGenericEntity type for use +// with apply. +type SignatureGenericEntityApplyConfiguration struct { + Organization *string `json:"organization,omitempty"` + CommonName *string `json:"commonName,omitempty"` +} + +// SignatureGenericEntityApplyConfiguration constructs a declarative configuration of the SignatureGenericEntity type for use with +// apply. +func SignatureGenericEntity() *SignatureGenericEntityApplyConfiguration { + return &SignatureGenericEntityApplyConfiguration{} +} + +// WithOrganization sets the Organization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Organization field is set to the value of the last call. +func (b *SignatureGenericEntityApplyConfiguration) WithOrganization(value string) *SignatureGenericEntityApplyConfiguration { + b.Organization = &value + return b +} + +// WithCommonName sets the CommonName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CommonName field is set to the value of the last call. +func (b *SignatureGenericEntityApplyConfiguration) WithCommonName(value string) *SignatureGenericEntityApplyConfiguration { + b.CommonName = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signatureissuer.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signatureissuer.go new file mode 100644 index 0000000000000..68564dd5f01d1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signatureissuer.go @@ -0,0 +1,31 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// SignatureIssuerApplyConfiguration represents a declarative configuration of the SignatureIssuer type for use +// with apply. +type SignatureIssuerApplyConfiguration struct { + SignatureGenericEntityApplyConfiguration `json:",inline"` +} + +// SignatureIssuerApplyConfiguration constructs a declarative configuration of the SignatureIssuer type for use with +// apply. +func SignatureIssuer() *SignatureIssuerApplyConfiguration { + return &SignatureIssuerApplyConfiguration{} +} + +// WithOrganization sets the Organization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Organization field is set to the value of the last call. +func (b *SignatureIssuerApplyConfiguration) WithOrganization(value string) *SignatureIssuerApplyConfiguration { + b.SignatureGenericEntityApplyConfiguration.Organization = &value + return b +} + +// WithCommonName sets the CommonName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CommonName field is set to the value of the last call. +func (b *SignatureIssuerApplyConfiguration) WithCommonName(value string) *SignatureIssuerApplyConfiguration { + b.SignatureGenericEntityApplyConfiguration.CommonName = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturesubject.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturesubject.go new file mode 100644 index 0000000000000..bc15e4f373056 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturesubject.go @@ -0,0 +1,40 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// SignatureSubjectApplyConfiguration represents a declarative configuration of the SignatureSubject type for use +// with apply. +type SignatureSubjectApplyConfiguration struct { + SignatureGenericEntityApplyConfiguration `json:",inline"` + PublicKeyID *string `json:"publicKeyID,omitempty"` +} + +// SignatureSubjectApplyConfiguration constructs a declarative configuration of the SignatureSubject type for use with +// apply. +func SignatureSubject() *SignatureSubjectApplyConfiguration { + return &SignatureSubjectApplyConfiguration{} +} + +// WithOrganization sets the Organization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Organization field is set to the value of the last call. +func (b *SignatureSubjectApplyConfiguration) WithOrganization(value string) *SignatureSubjectApplyConfiguration { + b.SignatureGenericEntityApplyConfiguration.Organization = &value + return b +} + +// WithCommonName sets the CommonName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CommonName field is set to the value of the last call. +func (b *SignatureSubjectApplyConfiguration) WithCommonName(value string) *SignatureSubjectApplyConfiguration { + b.SignatureGenericEntityApplyConfiguration.CommonName = &value + return b +} + +// WithPublicKeyID sets the PublicKeyID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PublicKeyID field is set to the value of the last call. +func (b *SignatureSubjectApplyConfiguration) WithPublicKeyID(value string) *SignatureSubjectApplyConfiguration { + b.PublicKeyID = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagevent.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagevent.go new file mode 100644 index 0000000000000..d123e74cba855 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagevent.go @@ -0,0 +1,54 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TagEventApplyConfiguration represents a declarative configuration of the TagEvent type for use +// with apply. +type TagEventApplyConfiguration struct { + Created *metav1.Time `json:"created,omitempty"` + DockerImageReference *string `json:"dockerImageReference,omitempty"` + Image *string `json:"image,omitempty"` + Generation *int64 `json:"generation,omitempty"` +} + +// TagEventApplyConfiguration constructs a declarative configuration of the TagEvent type for use with +// apply. +func TagEvent() *TagEventApplyConfiguration { + return &TagEventApplyConfiguration{} +} + +// WithCreated sets the Created field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Created field is set to the value of the last call. +func (b *TagEventApplyConfiguration) WithCreated(value metav1.Time) *TagEventApplyConfiguration { + b.Created = &value + return b +} + +// WithDockerImageReference sets the DockerImageReference field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DockerImageReference field is set to the value of the last call. +func (b *TagEventApplyConfiguration) WithDockerImageReference(value string) *TagEventApplyConfiguration { + b.DockerImageReference = &value + return b +} + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *TagEventApplyConfiguration) WithImage(value string) *TagEventApplyConfiguration { + b.Image = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *TagEventApplyConfiguration) WithGeneration(value int64) *TagEventApplyConfiguration { + b.Generation = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tageventcondition.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tageventcondition.go new file mode 100644 index 0000000000000..ba6193fc3e672 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tageventcondition.go @@ -0,0 +1,74 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + imagev1 "github.com/openshift/api/image/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TagEventConditionApplyConfiguration represents a declarative configuration of the TagEventCondition type for use +// with apply. +type TagEventConditionApplyConfiguration struct { + Type *imagev1.TagEventConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` + Generation *int64 `json:"generation,omitempty"` +} + +// TagEventConditionApplyConfiguration constructs a declarative configuration of the TagEventCondition type for use with +// apply. +func TagEventCondition() *TagEventConditionApplyConfiguration { + return &TagEventConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *TagEventConditionApplyConfiguration) WithType(value imagev1.TagEventConditionType) *TagEventConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *TagEventConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *TagEventConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *TagEventConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *TagEventConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *TagEventConditionApplyConfiguration) WithReason(value string) *TagEventConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *TagEventConditionApplyConfiguration) WithMessage(value string) *TagEventConditionApplyConfiguration { + b.Message = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *TagEventConditionApplyConfiguration) WithGeneration(value int64) *TagEventConditionApplyConfiguration { + b.Generation = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagimportpolicy.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagimportpolicy.go new file mode 100644 index 0000000000000..996690343c72f --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagimportpolicy.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + imagev1 "github.com/openshift/api/image/v1" +) + +// TagImportPolicyApplyConfiguration represents a declarative configuration of the TagImportPolicy type for use +// with apply. +type TagImportPolicyApplyConfiguration struct { + Insecure *bool `json:"insecure,omitempty"` + Scheduled *bool `json:"scheduled,omitempty"` + ImportMode *imagev1.ImportModeType `json:"importMode,omitempty"` +} + +// TagImportPolicyApplyConfiguration constructs a declarative configuration of the TagImportPolicy type for use with +// apply. +func TagImportPolicy() *TagImportPolicyApplyConfiguration { + return &TagImportPolicyApplyConfiguration{} +} + +// WithInsecure sets the Insecure field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Insecure field is set to the value of the last call. +func (b *TagImportPolicyApplyConfiguration) WithInsecure(value bool) *TagImportPolicyApplyConfiguration { + b.Insecure = &value + return b +} + +// WithScheduled sets the Scheduled field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Scheduled field is set to the value of the last call. +func (b *TagImportPolicyApplyConfiguration) WithScheduled(value bool) *TagImportPolicyApplyConfiguration { + b.Scheduled = &value + return b +} + +// WithImportMode sets the ImportMode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImportMode field is set to the value of the last call. +func (b *TagImportPolicyApplyConfiguration) WithImportMode(value imagev1.ImportModeType) *TagImportPolicyApplyConfiguration { + b.ImportMode = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagreference.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagreference.go new file mode 100644 index 0000000000000..648f30e31af44 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagreference.go @@ -0,0 +1,87 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// TagReferenceApplyConfiguration represents a declarative configuration of the TagReference type for use +// with apply. +type TagReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + From *corev1.ObjectReference `json:"from,omitempty"` + Reference *bool `json:"reference,omitempty"` + Generation *int64 `json:"generation,omitempty"` + ImportPolicy *TagImportPolicyApplyConfiguration `json:"importPolicy,omitempty"` + ReferencePolicy *TagReferencePolicyApplyConfiguration `json:"referencePolicy,omitempty"` +} + +// TagReferenceApplyConfiguration constructs a declarative configuration of the TagReference type for use with +// apply. +func TagReference() *TagReferenceApplyConfiguration { + return &TagReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *TagReferenceApplyConfiguration) WithName(value string) *TagReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *TagReferenceApplyConfiguration) WithAnnotations(entries map[string]string) *TagReferenceApplyConfiguration { + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithFrom sets the From field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the From field is set to the value of the last call. +func (b *TagReferenceApplyConfiguration) WithFrom(value corev1.ObjectReference) *TagReferenceApplyConfiguration { + b.From = &value + return b +} + +// WithReference sets the Reference field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reference field is set to the value of the last call. +func (b *TagReferenceApplyConfiguration) WithReference(value bool) *TagReferenceApplyConfiguration { + b.Reference = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *TagReferenceApplyConfiguration) WithGeneration(value int64) *TagReferenceApplyConfiguration { + b.Generation = &value + return b +} + +// WithImportPolicy sets the ImportPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImportPolicy field is set to the value of the last call. +func (b *TagReferenceApplyConfiguration) WithImportPolicy(value *TagImportPolicyApplyConfiguration) *TagReferenceApplyConfiguration { + b.ImportPolicy = value + return b +} + +// WithReferencePolicy sets the ReferencePolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReferencePolicy field is set to the value of the last call. +func (b *TagReferenceApplyConfiguration) WithReferencePolicy(value *TagReferencePolicyApplyConfiguration) *TagReferenceApplyConfiguration { + b.ReferencePolicy = value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagreferencepolicy.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagreferencepolicy.go new file mode 100644 index 0000000000000..d010d3ac0e8e0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagreferencepolicy.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + imagev1 "github.com/openshift/api/image/v1" +) + +// TagReferencePolicyApplyConfiguration represents a declarative configuration of the TagReferencePolicy type for use +// with apply. +type TagReferencePolicyApplyConfiguration struct { + Type *imagev1.TagReferencePolicyType `json:"type,omitempty"` +} + +// TagReferencePolicyApplyConfiguration constructs a declarative configuration of the TagReferencePolicy type for use with +// apply. +func TagReferencePolicy() *TagReferencePolicyApplyConfiguration { + return &TagReferencePolicyApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *TagReferencePolicyApplyConfiguration) WithType(value imagev1.TagReferencePolicyType) *TagReferencePolicyApplyConfiguration { + b.Type = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/internal/internal.go new file mode 100644 index 0000000000000..8c46c4a5e3de8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/internal/internal.go @@ -0,0 +1,592 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package internal + +import ( + fmt "fmt" + sync "sync" + + typed "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +func Parser() *typed.Parser { + parserOnce.Do(func() { + var err error + parser, err = typed.NewParser(schemaYAML) + if err != nil { + panic(fmt.Sprintf("Failed to parse schema: %v", err)) + } + }) + return parser +} + +var parserOnce sync.Once +var parser *typed.Parser +var schemaYAML = typed.YAMLObject(`types: +- name: com.github.openshift.api.image.v1.Image + map: + fields: + - name: apiVersion + type: + scalar: string + - name: dockerImageConfig + type: + scalar: string + - name: dockerImageLayers + type: + list: + elementType: + namedType: com.github.openshift.api.image.v1.ImageLayer + elementRelationship: atomic + - name: dockerImageManifest + type: + scalar: string + - name: dockerImageManifestMediaType + type: + scalar: string + - name: dockerImageManifests + type: + list: + elementType: + namedType: com.github.openshift.api.image.v1.ImageManifest + elementRelationship: atomic + - name: dockerImageMetadata + type: + namedType: __untyped_atomic_ + - name: dockerImageMetadataVersion + type: + scalar: string + - name: dockerImageReference + type: + scalar: string + - name: dockerImageSignatures + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: signatures + type: + list: + elementType: + namedType: com.github.openshift.api.image.v1.ImageSignature + elementRelationship: associative + keys: + - name +- name: com.github.openshift.api.image.v1.ImageLayer + map: + fields: + - name: mediaType + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: size + type: + scalar: numeric + default: 0 +- name: com.github.openshift.api.image.v1.ImageLookupPolicy + map: + fields: + - name: local + type: + scalar: boolean + default: false +- name: com.github.openshift.api.image.v1.ImageManifest + map: + fields: + - name: architecture + type: + scalar: string + default: "" + - name: digest + type: + scalar: string + default: "" + - name: manifestSize + type: + scalar: numeric + default: 0 + - name: mediaType + type: + scalar: string + default: "" + - name: os + type: + scalar: string + default: "" + - name: variant + type: + scalar: string +- name: com.github.openshift.api.image.v1.ImageSignature + map: + fields: + - name: apiVersion + type: + scalar: string + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.image.v1.SignatureCondition + elementRelationship: associative + keys: + - type + - name: content + type: + scalar: string + - name: created + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: imageIdentity + type: + scalar: string + - name: issuedBy + type: + namedType: com.github.openshift.api.image.v1.SignatureIssuer + - name: issuedTo + type: + namedType: com.github.openshift.api.image.v1.SignatureSubject + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: signedClaims + type: + map: + elementType: + scalar: string + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.image.v1.ImageStream + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.image.v1.ImageStreamSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.image.v1.ImageStreamStatus + default: {} +- name: com.github.openshift.api.image.v1.ImageStreamMapping + map: + fields: + - name: apiVersion + type: + scalar: string + - name: image + type: + namedType: com.github.openshift.api.image.v1.Image + default: {} + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: tag + type: + scalar: string + default: "" +- name: com.github.openshift.api.image.v1.ImageStreamSpec + map: + fields: + - name: dockerImageRepository + type: + scalar: string + - name: lookupPolicy + type: + namedType: com.github.openshift.api.image.v1.ImageLookupPolicy + default: {} + - name: tags + type: + list: + elementType: + namedType: com.github.openshift.api.image.v1.TagReference + elementRelationship: associative + keys: + - name +- name: com.github.openshift.api.image.v1.ImageStreamStatus + map: + fields: + - name: dockerImageRepository + type: + scalar: string + default: "" + - name: publicDockerImageRepository + type: + scalar: string + - name: tags + type: + list: + elementType: + namedType: com.github.openshift.api.image.v1.NamedTagEventList + elementRelationship: associative + keys: + - tag +- name: com.github.openshift.api.image.v1.NamedTagEventList + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.image.v1.TagEventCondition + elementRelationship: atomic + - name: items + type: + list: + elementType: + namedType: com.github.openshift.api.image.v1.TagEvent + elementRelationship: atomic + - name: tag + type: + scalar: string + default: "" +- name: com.github.openshift.api.image.v1.SignatureCondition + map: + fields: + - name: lastProbeTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message + type: + scalar: string + - name: reason + type: + scalar: string + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.image.v1.SignatureIssuer + map: + fields: + - name: commonName + type: + scalar: string + - name: organization + type: + scalar: string +- name: com.github.openshift.api.image.v1.SignatureSubject + map: + fields: + - name: commonName + type: + scalar: string + - name: organization + type: + scalar: string + - name: publicKeyID + type: + scalar: string + default: "" +- name: com.github.openshift.api.image.v1.TagEvent + map: + fields: + - name: created + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: dockerImageReference + type: + scalar: string + default: "" + - name: generation + type: + scalar: numeric + default: 0 + - name: image + type: + scalar: string + default: "" +- name: com.github.openshift.api.image.v1.TagEventCondition + map: + fields: + - name: generation + type: + scalar: numeric + default: 0 + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message + type: + scalar: string + - name: reason + type: + scalar: string + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.image.v1.TagImportPolicy + map: + fields: + - name: importMode + type: + scalar: string + - name: insecure + type: + scalar: boolean + - name: scheduled + type: + scalar: boolean +- name: com.github.openshift.api.image.v1.TagReference + map: + fields: + - name: annotations + type: + map: + elementType: + scalar: string + - name: from + type: + namedType: io.k8s.api.core.v1.ObjectReference + - name: generation + type: + scalar: numeric + - name: importPolicy + type: + namedType: com.github.openshift.api.image.v1.TagImportPolicy + default: {} + - name: name + type: + scalar: string + default: "" + - name: reference + type: + scalar: boolean + - name: referencePolicy + type: + namedType: com.github.openshift.api.image.v1.TagReferencePolicy + default: {} +- name: com.github.openshift.api.image.v1.TagReferencePolicy + map: + fields: + - name: type + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.ObjectReference + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldPath + type: + scalar: string + - name: kind + type: + scalar: string + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: resourceVersion + type: + scalar: string + - name: uid + type: + scalar: string + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldsType + type: + scalar: string + - name: fieldsV1 + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + - name: manager + type: + scalar: string + - name: operation + type: + scalar: string + - name: subresource + type: + scalar: string + - name: time + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + map: + fields: + - name: annotations + type: + map: + elementType: + scalar: string + - name: creationTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: deletionGracePeriodSeconds + type: + scalar: numeric + - name: deletionTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: finalizers + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: generateName + type: + scalar: string + - name: generation + type: + scalar: numeric + - name: labels + type: + map: + elementType: + scalar: string + - name: managedFields + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + elementRelationship: atomic + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: ownerReferences + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + elementRelationship: associative + keys: + - uid + - name: resourceVersion + type: + scalar: string + - name: selfLink + type: + scalar: string + - name: uid + type: + scalar: string +- name: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + map: + fields: + - name: apiVersion + type: + scalar: string + default: "" + - name: blockOwnerDeletion + type: + scalar: boolean + - name: controller + type: + scalar: boolean + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: uid + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Time + scalar: untyped +- name: io.k8s.apimachinery.pkg.runtime.RawExtension + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +- name: __untyped_deduced_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +`) diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/clientset.go new file mode 100644 index 0000000000000..982409785f6a6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/clientset.go @@ -0,0 +1,104 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + fmt "fmt" + http "net/http" + + imagev1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + ImageV1() imagev1.ImageV1Interface +} + +// Clientset contains the clients for groups. +type Clientset struct { + *discovery.DiscoveryClient + imageV1 *imagev1.ImageV1Client +} + +// ImageV1 retrieves the ImageV1Client +func (c *Clientset) ImageV1() imagev1.ImageV1Interface { + return c.imageV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.imageV1, err = imagev1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.imageV1 = imagev1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000000..14db57a58f8d2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/scheme/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000000..7765404848f5f --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/scheme/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + imagev1 "github.com/openshift/api/image/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + imagev1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/doc.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/doc.go new file mode 100644 index 0000000000000..225e6b2be34f2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/generated_expansion.go new file mode 100644 index 0000000000000..c495ba76e69cd --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/generated_expansion.go @@ -0,0 +1,19 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type ImageExpansion interface{} + +type ImageSignatureExpansion interface{} + +type ImageStreamExpansion interface{} + +type ImageStreamImageExpansion interface{} + +type ImageStreamImportExpansion interface{} + +type ImageStreamMappingExpansion interface{} + +type ImageStreamTagExpansion interface{} + +type ImageTagExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/image.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/image.go new file mode 100644 index 0000000000000..6976e8debcc9b --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/image.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + imagev1 "github.com/openshift/api/image/v1" + applyconfigurationsimagev1 "github.com/openshift/client-go/image/applyconfigurations/image/v1" + scheme "github.com/openshift/client-go/image/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ImagesGetter has a method to return a ImageInterface. +// A group's client should implement this interface. +type ImagesGetter interface { + Images() ImageInterface +} + +// ImageInterface has methods to work with Image resources. +type ImageInterface interface { + Create(ctx context.Context, image *imagev1.Image, opts metav1.CreateOptions) (*imagev1.Image, error) + Update(ctx context.Context, image *imagev1.Image, opts metav1.UpdateOptions) (*imagev1.Image, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*imagev1.Image, error) + List(ctx context.Context, opts metav1.ListOptions) (*imagev1.ImageList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *imagev1.Image, err error) + Apply(ctx context.Context, image *applyconfigurationsimagev1.ImageApplyConfiguration, opts metav1.ApplyOptions) (result *imagev1.Image, err error) + ImageExpansion +} + +// images implements ImageInterface +type images struct { + *gentype.ClientWithListAndApply[*imagev1.Image, *imagev1.ImageList, *applyconfigurationsimagev1.ImageApplyConfiguration] +} + +// newImages returns a Images +func newImages(c *ImageV1Client) *images { + return &images{ + gentype.NewClientWithListAndApply[*imagev1.Image, *imagev1.ImageList, *applyconfigurationsimagev1.ImageApplyConfiguration]( + "images", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *imagev1.Image { return &imagev1.Image{} }, + func() *imagev1.ImageList { return &imagev1.ImageList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/image_client.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/image_client.go new file mode 100644 index 0000000000000..7c93797a55d47 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/image_client.go @@ -0,0 +1,126 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + http "net/http" + + imagev1 "github.com/openshift/api/image/v1" + scheme "github.com/openshift/client-go/image/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type ImageV1Interface interface { + RESTClient() rest.Interface + ImagesGetter + ImageSignaturesGetter + ImageStreamsGetter + ImageStreamImagesGetter + ImageStreamImportsGetter + ImageStreamMappingsGetter + ImageStreamTagsGetter + ImageTagsGetter +} + +// ImageV1Client is used to interact with features provided by the image.openshift.io group. +type ImageV1Client struct { + restClient rest.Interface +} + +func (c *ImageV1Client) Images() ImageInterface { + return newImages(c) +} + +func (c *ImageV1Client) ImageSignatures() ImageSignatureInterface { + return newImageSignatures(c) +} + +func (c *ImageV1Client) ImageStreams(namespace string) ImageStreamInterface { + return newImageStreams(c, namespace) +} + +func (c *ImageV1Client) ImageStreamImages(namespace string) ImageStreamImageInterface { + return newImageStreamImages(c, namespace) +} + +func (c *ImageV1Client) ImageStreamImports(namespace string) ImageStreamImportInterface { + return newImageStreamImports(c, namespace) +} + +func (c *ImageV1Client) ImageStreamMappings(namespace string) ImageStreamMappingInterface { + return newImageStreamMappings(c, namespace) +} + +func (c *ImageV1Client) ImageStreamTags(namespace string) ImageStreamTagInterface { + return newImageStreamTags(c, namespace) +} + +func (c *ImageV1Client) ImageTags(namespace string) ImageTagInterface { + return newImageTags(c, namespace) +} + +// NewForConfig creates a new ImageV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*ImageV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ImageV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ImageV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &ImageV1Client{client}, nil +} + +// NewForConfigOrDie creates a new ImageV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ImageV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ImageV1Client for the given RESTClient. +func New(c rest.Interface) *ImageV1Client { + return &ImageV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := imagev1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ImageV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagesignature.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagesignature.go new file mode 100644 index 0000000000000..a53b63c62f03b --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagesignature.go @@ -0,0 +1,43 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + imagev1 "github.com/openshift/api/image/v1" + scheme "github.com/openshift/client-go/image/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" +) + +// ImageSignaturesGetter has a method to return a ImageSignatureInterface. +// A group's client should implement this interface. +type ImageSignaturesGetter interface { + ImageSignatures() ImageSignatureInterface +} + +// ImageSignatureInterface has methods to work with ImageSignature resources. +type ImageSignatureInterface interface { + Create(ctx context.Context, imageSignature *imagev1.ImageSignature, opts metav1.CreateOptions) (*imagev1.ImageSignature, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + ImageSignatureExpansion +} + +// imageSignatures implements ImageSignatureInterface +type imageSignatures struct { + *gentype.Client[*imagev1.ImageSignature] +} + +// newImageSignatures returns a ImageSignatures +func newImageSignatures(c *ImageV1Client) *imageSignatures { + return &imageSignatures{ + gentype.NewClient[*imagev1.ImageSignature]( + "imagesignatures", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *imagev1.ImageSignature { return &imagev1.ImageSignature{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestream.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestream.go new file mode 100644 index 0000000000000..90fc21df2db3f --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestream.go @@ -0,0 +1,89 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + imagev1 "github.com/openshift/api/image/v1" + applyconfigurationsimagev1 "github.com/openshift/client-go/image/applyconfigurations/image/v1" + scheme "github.com/openshift/client-go/image/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ImageStreamsGetter has a method to return a ImageStreamInterface. +// A group's client should implement this interface. +type ImageStreamsGetter interface { + ImageStreams(namespace string) ImageStreamInterface +} + +// ImageStreamInterface has methods to work with ImageStream resources. +type ImageStreamInterface interface { + Create(ctx context.Context, imageStream *imagev1.ImageStream, opts metav1.CreateOptions) (*imagev1.ImageStream, error) + Update(ctx context.Context, imageStream *imagev1.ImageStream, opts metav1.UpdateOptions) (*imagev1.ImageStream, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, imageStream *imagev1.ImageStream, opts metav1.UpdateOptions) (*imagev1.ImageStream, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*imagev1.ImageStream, error) + List(ctx context.Context, opts metav1.ListOptions) (*imagev1.ImageStreamList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *imagev1.ImageStream, err error) + Apply(ctx context.Context, imageStream *applyconfigurationsimagev1.ImageStreamApplyConfiguration, opts metav1.ApplyOptions) (result *imagev1.ImageStream, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, imageStream *applyconfigurationsimagev1.ImageStreamApplyConfiguration, opts metav1.ApplyOptions) (result *imagev1.ImageStream, err error) + Secrets(ctx context.Context, imageStreamName string, options metav1.GetOptions) (*imagev1.SecretList, error) + Layers(ctx context.Context, imageStreamName string, options metav1.GetOptions) (*imagev1.ImageStreamLayers, error) + + ImageStreamExpansion +} + +// imageStreams implements ImageStreamInterface +type imageStreams struct { + *gentype.ClientWithListAndApply[*imagev1.ImageStream, *imagev1.ImageStreamList, *applyconfigurationsimagev1.ImageStreamApplyConfiguration] +} + +// newImageStreams returns a ImageStreams +func newImageStreams(c *ImageV1Client, namespace string) *imageStreams { + return &imageStreams{ + gentype.NewClientWithListAndApply[*imagev1.ImageStream, *imagev1.ImageStreamList, *applyconfigurationsimagev1.ImageStreamApplyConfiguration]( + "imagestreams", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *imagev1.ImageStream { return &imagev1.ImageStream{} }, + func() *imagev1.ImageStreamList { return &imagev1.ImageStreamList{} }, + ), + } +} + +// Secrets takes name of the imageStream, and returns the corresponding imagev1.SecretList object, and an error if there is any. +func (c *imageStreams) Secrets(ctx context.Context, imageStreamName string, options metav1.GetOptions) (result *imagev1.SecretList, err error) { + result = &imagev1.SecretList{} + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). + Resource("imagestreams"). + Name(imageStreamName). + SubResource("secrets"). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// Layers takes name of the imageStream, and returns the corresponding imagev1.ImageStreamLayers object, and an error if there is any. +func (c *imageStreams) Layers(ctx context.Context, imageStreamName string, options metav1.GetOptions) (result *imagev1.ImageStreamLayers, err error) { + result = &imagev1.ImageStreamLayers{} + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). + Resource("imagestreams"). + Name(imageStreamName). + SubResource("layers"). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamimage.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamimage.go new file mode 100644 index 0000000000000..ea329220bb260 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamimage.go @@ -0,0 +1,42 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + imagev1 "github.com/openshift/api/image/v1" + scheme "github.com/openshift/client-go/image/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" +) + +// ImageStreamImagesGetter has a method to return a ImageStreamImageInterface. +// A group's client should implement this interface. +type ImageStreamImagesGetter interface { + ImageStreamImages(namespace string) ImageStreamImageInterface +} + +// ImageStreamImageInterface has methods to work with ImageStreamImage resources. +type ImageStreamImageInterface interface { + Get(ctx context.Context, name string, opts metav1.GetOptions) (*imagev1.ImageStreamImage, error) + ImageStreamImageExpansion +} + +// imageStreamImages implements ImageStreamImageInterface +type imageStreamImages struct { + *gentype.Client[*imagev1.ImageStreamImage] +} + +// newImageStreamImages returns a ImageStreamImages +func newImageStreamImages(c *ImageV1Client, namespace string) *imageStreamImages { + return &imageStreamImages{ + gentype.NewClient[*imagev1.ImageStreamImage]( + "imagestreamimages", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *imagev1.ImageStreamImage { return &imagev1.ImageStreamImage{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamimport.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamimport.go new file mode 100644 index 0000000000000..7fbd420a405a3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamimport.go @@ -0,0 +1,42 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + imagev1 "github.com/openshift/api/image/v1" + scheme "github.com/openshift/client-go/image/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" +) + +// ImageStreamImportsGetter has a method to return a ImageStreamImportInterface. +// A group's client should implement this interface. +type ImageStreamImportsGetter interface { + ImageStreamImports(namespace string) ImageStreamImportInterface +} + +// ImageStreamImportInterface has methods to work with ImageStreamImport resources. +type ImageStreamImportInterface interface { + Create(ctx context.Context, imageStreamImport *imagev1.ImageStreamImport, opts metav1.CreateOptions) (*imagev1.ImageStreamImport, error) + ImageStreamImportExpansion +} + +// imageStreamImports implements ImageStreamImportInterface +type imageStreamImports struct { + *gentype.Client[*imagev1.ImageStreamImport] +} + +// newImageStreamImports returns a ImageStreamImports +func newImageStreamImports(c *ImageV1Client, namespace string) *imageStreamImports { + return &imageStreamImports{ + gentype.NewClient[*imagev1.ImageStreamImport]( + "imagestreamimports", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *imagev1.ImageStreamImport { return &imagev1.ImageStreamImport{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreammapping.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreammapping.go new file mode 100644 index 0000000000000..eae9d77dfd678 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreammapping.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + apiimagev1 "github.com/openshift/api/image/v1" + imagev1 "github.com/openshift/client-go/image/applyconfigurations/image/v1" + scheme "github.com/openshift/client-go/image/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" +) + +// ImageStreamMappingsGetter has a method to return a ImageStreamMappingInterface. +// A group's client should implement this interface. +type ImageStreamMappingsGetter interface { + ImageStreamMappings(namespace string) ImageStreamMappingInterface +} + +// ImageStreamMappingInterface has methods to work with ImageStreamMapping resources. +type ImageStreamMappingInterface interface { + Apply(ctx context.Context, imageStreamMapping *imagev1.ImageStreamMappingApplyConfiguration, opts metav1.ApplyOptions) (result *apiimagev1.ImageStreamMapping, err error) + Create(ctx context.Context, imageStreamMapping *apiimagev1.ImageStreamMapping, opts metav1.CreateOptions) (*metav1.Status, error) + + ImageStreamMappingExpansion +} + +// imageStreamMappings implements ImageStreamMappingInterface +type imageStreamMappings struct { + *gentype.ClientWithApply[*apiimagev1.ImageStreamMapping, *imagev1.ImageStreamMappingApplyConfiguration] +} + +// newImageStreamMappings returns a ImageStreamMappings +func newImageStreamMappings(c *ImageV1Client, namespace string) *imageStreamMappings { + return &imageStreamMappings{ + gentype.NewClientWithApply[*apiimagev1.ImageStreamMapping, *imagev1.ImageStreamMappingApplyConfiguration]( + "imagestreammappings", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *apiimagev1.ImageStreamMapping { return &apiimagev1.ImageStreamMapping{} }, + ), + } +} + +// Create takes the representation of a imageStreamMapping and creates it. Returns the server's representation of the status, and an error, if there is any. +func (c *imageStreamMappings) Create(ctx context.Context, imageStreamMapping *apiimagev1.ImageStreamMapping, opts metav1.CreateOptions) (result *metav1.Status, err error) { + result = &metav1.Status{} + err = c.GetClient().Post(). + Namespace(c.GetNamespace()). + Resource("imagestreammappings"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(imageStreamMapping). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamtag.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamtag.go new file mode 100644 index 0000000000000..2f09ab86cb13f --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamtag.go @@ -0,0 +1,47 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + imagev1 "github.com/openshift/api/image/v1" + scheme "github.com/openshift/client-go/image/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" +) + +// ImageStreamTagsGetter has a method to return a ImageStreamTagInterface. +// A group's client should implement this interface. +type ImageStreamTagsGetter interface { + ImageStreamTags(namespace string) ImageStreamTagInterface +} + +// ImageStreamTagInterface has methods to work with ImageStreamTag resources. +type ImageStreamTagInterface interface { + Create(ctx context.Context, imageStreamTag *imagev1.ImageStreamTag, opts metav1.CreateOptions) (*imagev1.ImageStreamTag, error) + Update(ctx context.Context, imageStreamTag *imagev1.ImageStreamTag, opts metav1.UpdateOptions) (*imagev1.ImageStreamTag, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*imagev1.ImageStreamTag, error) + List(ctx context.Context, opts metav1.ListOptions) (*imagev1.ImageStreamTagList, error) + ImageStreamTagExpansion +} + +// imageStreamTags implements ImageStreamTagInterface +type imageStreamTags struct { + *gentype.ClientWithList[*imagev1.ImageStreamTag, *imagev1.ImageStreamTagList] +} + +// newImageStreamTags returns a ImageStreamTags +func newImageStreamTags(c *ImageV1Client, namespace string) *imageStreamTags { + return &imageStreamTags{ + gentype.NewClientWithList[*imagev1.ImageStreamTag, *imagev1.ImageStreamTagList]( + "imagestreamtags", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *imagev1.ImageStreamTag { return &imagev1.ImageStreamTag{} }, + func() *imagev1.ImageStreamTagList { return &imagev1.ImageStreamTagList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagetag.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagetag.go new file mode 100644 index 0000000000000..1d69b558d48fc --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagetag.go @@ -0,0 +1,47 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + imagev1 "github.com/openshift/api/image/v1" + scheme "github.com/openshift/client-go/image/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" +) + +// ImageTagsGetter has a method to return a ImageTagInterface. +// A group's client should implement this interface. +type ImageTagsGetter interface { + ImageTags(namespace string) ImageTagInterface +} + +// ImageTagInterface has methods to work with ImageTag resources. +type ImageTagInterface interface { + Create(ctx context.Context, imageTag *imagev1.ImageTag, opts metav1.CreateOptions) (*imagev1.ImageTag, error) + Update(ctx context.Context, imageTag *imagev1.ImageTag, opts metav1.UpdateOptions) (*imagev1.ImageTag, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*imagev1.ImageTag, error) + List(ctx context.Context, opts metav1.ListOptions) (*imagev1.ImageTagList, error) + ImageTagExpansion +} + +// imageTags implements ImageTagInterface +type imageTags struct { + *gentype.ClientWithList[*imagev1.ImageTag, *imagev1.ImageTagList] +} + +// newImageTags returns a ImageTags +func newImageTags(c *ImageV1Client, namespace string) *imageTags { + return &imageTags{ + gentype.NewClientWithList[*imagev1.ImageTag, *imagev1.ImageTagList]( + "imagetags", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *imagev1.ImageTag { return &imagev1.ImageTag{} }, + func() *imagev1.ImageTagList { return &imagev1.ImageTagList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/factory.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/factory.go new file mode 100644 index 0000000000000..ecb2d2f95814e --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/factory.go @@ -0,0 +1,246 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/openshift/client-go/image/clientset/versioned" + image "github.com/openshift/client-go/image/informers/externalversions/image" + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + transform cache.TransformFunc + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.shuttingDown { + return + } + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() + f.startedInformers[informerType] = true + } + } +} + +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + + Image() image.Interface +} + +func (f *sharedInformerFactory) Image() image.Interface { + return image.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/generic.go new file mode 100644 index 0000000000000..2f1bda6d8e79e --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/generic.go @@ -0,0 +1,48 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + fmt "fmt" + + v1 "github.com/openshift/api/image/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=image.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("images"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Image().V1().Images().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("imagestreams"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Image().V1().ImageStreams().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/image/interface.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/interface.go new file mode 100644 index 0000000000000..092550ed3dab4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/interface.go @@ -0,0 +1,30 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package image + +import ( + v1 "github.com/openshift/client-go/image/informers/externalversions/image/v1" + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/image.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/image.go new file mode 100644 index 0000000000000..63cf355ffa1bb --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/image.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiimagev1 "github.com/openshift/api/image/v1" + versioned "github.com/openshift/client-go/image/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" + imagev1 "github.com/openshift/client-go/image/listers/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ImageInformer provides access to a shared informer and lister for +// Images. +type ImageInformer interface { + Informer() cache.SharedIndexInformer + Lister() imagev1.ImageLister +} + +type imageInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewImageInformer constructs a new informer for Image type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewImageInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredImageInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredImageInformer constructs a new informer for Image type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredImageInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ImageV1().Images().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ImageV1().Images().Watch(context.TODO(), options) + }, + }, + &apiimagev1.Image{}, + resyncPeriod, + indexers, + ) +} + +func (f *imageInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredImageInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *imageInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiimagev1.Image{}, f.defaultInformer) +} + +func (f *imageInformer) Lister() imagev1.ImageLister { + return imagev1.NewImageLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/imagestream.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/imagestream.go new file mode 100644 index 0000000000000..5c2c8295acc52 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/imagestream.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiimagev1 "github.com/openshift/api/image/v1" + versioned "github.com/openshift/client-go/image/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" + imagev1 "github.com/openshift/client-go/image/listers/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ImageStreamInformer provides access to a shared informer and lister for +// ImageStreams. +type ImageStreamInformer interface { + Informer() cache.SharedIndexInformer + Lister() imagev1.ImageStreamLister +} + +type imageStreamInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewImageStreamInformer constructs a new informer for ImageStream type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewImageStreamInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredImageStreamInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredImageStreamInformer constructs a new informer for ImageStream type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredImageStreamInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ImageV1().ImageStreams(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ImageV1().ImageStreams(namespace).Watch(context.TODO(), options) + }, + }, + &apiimagev1.ImageStream{}, + resyncPeriod, + indexers, + ) +} + +func (f *imageStreamInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredImageStreamInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *imageStreamInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiimagev1.ImageStream{}, f.defaultInformer) +} + +func (f *imageStreamInformer) Lister() imagev1.ImageStreamLister { + return imagev1.NewImageStreamLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/interface.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/interface.go new file mode 100644 index 0000000000000..fd35c4df1abb7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/interface.go @@ -0,0 +1,36 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Images returns a ImageInformer. + Images() ImageInformer + // ImageStreams returns a ImageStreamInformer. + ImageStreams() ImageStreamInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Images returns a ImageInformer. +func (v *version) Images() ImageInformer { + return &imageInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ImageStreams returns a ImageStreamInformer. +func (v *version) ImageStreams() ImageStreamInformer { + return &imageStreamInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000000..c35dcbfa44bb9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,24 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/openshift/client-go/image/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/expansion_generated.go new file mode 100644 index 0000000000000..308b6db702d9f --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/expansion_generated.go @@ -0,0 +1,31 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// ImageListerExpansion allows custom methods to be added to +// ImageLister. +type ImageListerExpansion interface{} + +// ImageStreamListerExpansion allows custom methods to be added to +// ImageStreamLister. +type ImageStreamListerExpansion interface{} + +// ImageStreamNamespaceListerExpansion allows custom methods to be added to +// ImageStreamNamespaceLister. +type ImageStreamNamespaceListerExpansion interface{} + +// ImageStreamTagListerExpansion allows custom methods to be added to +// ImageStreamTagLister. +type ImageStreamTagListerExpansion interface{} + +// ImageStreamTagNamespaceListerExpansion allows custom methods to be added to +// ImageStreamTagNamespaceLister. +type ImageStreamTagNamespaceListerExpansion interface{} + +// ImageTagListerExpansion allows custom methods to be added to +// ImageTagLister. +type ImageTagListerExpansion interface{} + +// ImageTagNamespaceListerExpansion allows custom methods to be added to +// ImageTagNamespaceLister. +type ImageTagNamespaceListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/image.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/image.go new file mode 100644 index 0000000000000..9ac1044d96d97 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/image.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + imagev1 "github.com/openshift/api/image/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ImageLister helps list Images. +// All objects returned here must be treated as read-only. +type ImageLister interface { + // List lists all Images in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*imagev1.Image, err error) + // Get retrieves the Image from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*imagev1.Image, error) + ImageListerExpansion +} + +// imageLister implements the ImageLister interface. +type imageLister struct { + listers.ResourceIndexer[*imagev1.Image] +} + +// NewImageLister returns a new ImageLister. +func NewImageLister(indexer cache.Indexer) ImageLister { + return &imageLister{listers.New[*imagev1.Image](indexer, imagev1.Resource("image"))} +} diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestream.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestream.go new file mode 100644 index 0000000000000..e9f1474501b80 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestream.go @@ -0,0 +1,54 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + imagev1 "github.com/openshift/api/image/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ImageStreamLister helps list ImageStreams. +// All objects returned here must be treated as read-only. +type ImageStreamLister interface { + // List lists all ImageStreams in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*imagev1.ImageStream, err error) + // ImageStreams returns an object that can list and get ImageStreams. + ImageStreams(namespace string) ImageStreamNamespaceLister + ImageStreamListerExpansion +} + +// imageStreamLister implements the ImageStreamLister interface. +type imageStreamLister struct { + listers.ResourceIndexer[*imagev1.ImageStream] +} + +// NewImageStreamLister returns a new ImageStreamLister. +func NewImageStreamLister(indexer cache.Indexer) ImageStreamLister { + return &imageStreamLister{listers.New[*imagev1.ImageStream](indexer, imagev1.Resource("imagestream"))} +} + +// ImageStreams returns an object that can list and get ImageStreams. +func (s *imageStreamLister) ImageStreams(namespace string) ImageStreamNamespaceLister { + return imageStreamNamespaceLister{listers.NewNamespaced[*imagev1.ImageStream](s.ResourceIndexer, namespace)} +} + +// ImageStreamNamespaceLister helps list and get ImageStreams. +// All objects returned here must be treated as read-only. +type ImageStreamNamespaceLister interface { + // List lists all ImageStreams in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*imagev1.ImageStream, err error) + // Get retrieves the ImageStream from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*imagev1.ImageStream, error) + ImageStreamNamespaceListerExpansion +} + +// imageStreamNamespaceLister implements the ImageStreamNamespaceLister +// interface. +type imageStreamNamespaceLister struct { + listers.ResourceIndexer[*imagev1.ImageStream] +} diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestreamtag.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestreamtag.go new file mode 100644 index 0000000000000..5e77428504284 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestreamtag.go @@ -0,0 +1,54 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + imagev1 "github.com/openshift/api/image/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ImageStreamTagLister helps list ImageStreamTags. +// All objects returned here must be treated as read-only. +type ImageStreamTagLister interface { + // List lists all ImageStreamTags in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*imagev1.ImageStreamTag, err error) + // ImageStreamTags returns an object that can list and get ImageStreamTags. + ImageStreamTags(namespace string) ImageStreamTagNamespaceLister + ImageStreamTagListerExpansion +} + +// imageStreamTagLister implements the ImageStreamTagLister interface. +type imageStreamTagLister struct { + listers.ResourceIndexer[*imagev1.ImageStreamTag] +} + +// NewImageStreamTagLister returns a new ImageStreamTagLister. +func NewImageStreamTagLister(indexer cache.Indexer) ImageStreamTagLister { + return &imageStreamTagLister{listers.New[*imagev1.ImageStreamTag](indexer, imagev1.Resource("imagestreamtag"))} +} + +// ImageStreamTags returns an object that can list and get ImageStreamTags. +func (s *imageStreamTagLister) ImageStreamTags(namespace string) ImageStreamTagNamespaceLister { + return imageStreamTagNamespaceLister{listers.NewNamespaced[*imagev1.ImageStreamTag](s.ResourceIndexer, namespace)} +} + +// ImageStreamTagNamespaceLister helps list and get ImageStreamTags. +// All objects returned here must be treated as read-only. +type ImageStreamTagNamespaceLister interface { + // List lists all ImageStreamTags in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*imagev1.ImageStreamTag, err error) + // Get retrieves the ImageStreamTag from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*imagev1.ImageStreamTag, error) + ImageStreamTagNamespaceListerExpansion +} + +// imageStreamTagNamespaceLister implements the ImageStreamTagNamespaceLister +// interface. +type imageStreamTagNamespaceLister struct { + listers.ResourceIndexer[*imagev1.ImageStreamTag] +} diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/imagetag.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagetag.go new file mode 100644 index 0000000000000..511e2aae3c757 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagetag.go @@ -0,0 +1,54 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + imagev1 "github.com/openshift/api/image/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ImageTagLister helps list ImageTags. +// All objects returned here must be treated as read-only. +type ImageTagLister interface { + // List lists all ImageTags in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*imagev1.ImageTag, err error) + // ImageTags returns an object that can list and get ImageTags. + ImageTags(namespace string) ImageTagNamespaceLister + ImageTagListerExpansion +} + +// imageTagLister implements the ImageTagLister interface. +type imageTagLister struct { + listers.ResourceIndexer[*imagev1.ImageTag] +} + +// NewImageTagLister returns a new ImageTagLister. +func NewImageTagLister(indexer cache.Indexer) ImageTagLister { + return &imageTagLister{listers.New[*imagev1.ImageTag](indexer, imagev1.Resource("imagetag"))} +} + +// ImageTags returns an object that can list and get ImageTags. +func (s *imageTagLister) ImageTags(namespace string) ImageTagNamespaceLister { + return imageTagNamespaceLister{listers.NewNamespaced[*imagev1.ImageTag](s.ResourceIndexer, namespace)} +} + +// ImageTagNamespaceLister helps list and get ImageTags. +// All objects returned here must be treated as read-only. +type ImageTagNamespaceLister interface { + // List lists all ImageTags in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*imagev1.ImageTag, err error) + // Get retrieves the ImageTag from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*imagev1.ImageTag, error) + ImageTagNamespaceListerExpansion +} + +// imageTagNamespaceLister implements the ImageTagNamespaceLister +// interface. +type imageTagNamespaceLister struct { + listers.ResourceIndexer[*imagev1.ImageTag] +} diff --git a/vendor/github.com/openshift/client-go/network/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/network/applyconfigurations/internal/internal.go new file mode 100644 index 0000000000000..e7ad2b8a2053c --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/applyconfigurations/internal/internal.go @@ -0,0 +1,436 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package internal + +import ( + fmt "fmt" + sync "sync" + + typed "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +func Parser() *typed.Parser { + parserOnce.Do(func() { + var err error + parser, err = typed.NewParser(schemaYAML) + if err != nil { + panic(fmt.Sprintf("Failed to parse schema: %v", err)) + } + }) + return parser +} + +var parserOnce sync.Once +var parser *typed.Parser +var schemaYAML = typed.YAMLObject(`types: +- name: com.github.openshift.api.network.v1.ClusterNetwork + map: + fields: + - name: apiVersion + type: + scalar: string + - name: clusterNetworks + type: + list: + elementType: + namedType: com.github.openshift.api.network.v1.ClusterNetworkEntry + elementRelationship: atomic + - name: hostsubnetlength + type: + scalar: numeric + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: mtu + type: + scalar: numeric + - name: network + type: + scalar: string + - name: pluginName + type: + scalar: string + - name: serviceNetwork + type: + scalar: string + default: "" + - name: vxlanPort + type: + scalar: numeric +- name: com.github.openshift.api.network.v1.ClusterNetworkEntry + map: + fields: + - name: CIDR + type: + scalar: string + default: "" + - name: hostSubnetLength + type: + scalar: numeric + default: 0 +- name: com.github.openshift.api.network.v1.EgressNetworkPolicy + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.network.v1.EgressNetworkPolicySpec + default: {} +- name: com.github.openshift.api.network.v1.EgressNetworkPolicyPeer + map: + fields: + - name: cidrSelector + type: + scalar: string + - name: dnsName + type: + scalar: string +- name: com.github.openshift.api.network.v1.EgressNetworkPolicyRule + map: + fields: + - name: to + type: + namedType: com.github.openshift.api.network.v1.EgressNetworkPolicyPeer + default: {} + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.network.v1.EgressNetworkPolicySpec + map: + fields: + - name: egress + type: + list: + elementType: + namedType: com.github.openshift.api.network.v1.EgressNetworkPolicyRule + elementRelationship: atomic +- name: com.github.openshift.api.network.v1.HostSubnet + map: + fields: + - name: apiVersion + type: + scalar: string + - name: egressCIDRs + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: egressIPs + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: host + type: + scalar: string + default: "" + - name: hostIP + type: + scalar: string + default: "" + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: subnet + type: + scalar: string + default: "" +- name: com.github.openshift.api.network.v1.NetNamespace + map: + fields: + - name: apiVersion + type: + scalar: string + - name: egressIPs + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: netid + type: + scalar: numeric + default: 0 + - name: netname + type: + scalar: string + default: "" +- name: com.github.openshift.api.network.v1alpha1.DNSNameResolver + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.network.v1alpha1.DNSNameResolverSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.network.v1alpha1.DNSNameResolverStatus + default: {} +- name: com.github.openshift.api.network.v1alpha1.DNSNameResolverResolvedAddress + map: + fields: + - name: ip + type: + scalar: string + default: "" + - name: lastLookupTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: ttlSeconds + type: + scalar: numeric + default: 0 +- name: com.github.openshift.api.network.v1alpha1.DNSNameResolverResolvedName + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: dnsName + type: + scalar: string + default: "" + - name: resolutionFailures + type: + scalar: numeric + - name: resolvedAddresses + type: + list: + elementType: + namedType: com.github.openshift.api.network.v1alpha1.DNSNameResolverResolvedAddress + elementRelationship: associative + keys: + - ip +- name: com.github.openshift.api.network.v1alpha1.DNSNameResolverSpec + map: + fields: + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.network.v1alpha1.DNSNameResolverStatus + map: + fields: + - name: resolvedNames + type: + list: + elementType: + namedType: com.github.openshift.api.network.v1alpha1.DNSNameResolverResolvedName + elementRelationship: associative + keys: + - dnsName +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + map: + fields: + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message + type: + scalar: string + default: "" + - name: observedGeneration + type: + scalar: numeric + - name: reason + type: + scalar: string + default: "" + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldsType + type: + scalar: string + - name: fieldsV1 + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + - name: manager + type: + scalar: string + - name: operation + type: + scalar: string + - name: subresource + type: + scalar: string + - name: time + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + map: + fields: + - name: annotations + type: + map: + elementType: + scalar: string + - name: creationTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: deletionGracePeriodSeconds + type: + scalar: numeric + - name: deletionTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: finalizers + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: generateName + type: + scalar: string + - name: generation + type: + scalar: numeric + - name: labels + type: + map: + elementType: + scalar: string + - name: managedFields + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + elementRelationship: atomic + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: ownerReferences + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + elementRelationship: associative + keys: + - uid + - name: resourceVersion + type: + scalar: string + - name: selfLink + type: + scalar: string + - name: uid + type: + scalar: string +- name: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + map: + fields: + - name: apiVersion + type: + scalar: string + default: "" + - name: blockOwnerDeletion + type: + scalar: boolean + - name: controller + type: + scalar: boolean + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: uid + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Time + scalar: untyped +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +- name: __untyped_deduced_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +`) diff --git a/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/clusternetwork.go b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/clusternetwork.go new file mode 100644 index 0000000000000..c7df12fd8d82d --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/clusternetwork.go @@ -0,0 +1,296 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + networkv1 "github.com/openshift/api/network/v1" + internal "github.com/openshift/client-go/network/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterNetworkApplyConfiguration represents a declarative configuration of the ClusterNetwork type for use +// with apply. +type ClusterNetworkApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Network *string `json:"network,omitempty"` + HostSubnetLength *uint32 `json:"hostsubnetlength,omitempty"` + ServiceNetwork *string `json:"serviceNetwork,omitempty"` + PluginName *string `json:"pluginName,omitempty"` + ClusterNetworks []ClusterNetworkEntryApplyConfiguration `json:"clusterNetworks,omitempty"` + VXLANPort *uint32 `json:"vxlanPort,omitempty"` + MTU *uint32 `json:"mtu,omitempty"` +} + +// ClusterNetwork constructs a declarative configuration of the ClusterNetwork type for use with +// apply. +func ClusterNetwork(name string) *ClusterNetworkApplyConfiguration { + b := &ClusterNetworkApplyConfiguration{} + b.WithName(name) + b.WithKind("ClusterNetwork") + b.WithAPIVersion("network.openshift.io/v1") + return b +} + +// ExtractClusterNetwork extracts the applied configuration owned by fieldManager from +// clusterNetwork. If no managedFields are found in clusterNetwork for fieldManager, a +// ClusterNetworkApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// clusterNetwork must be a unmodified ClusterNetwork API object that was retrieved from the Kubernetes API. +// ExtractClusterNetwork provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractClusterNetwork(clusterNetwork *networkv1.ClusterNetwork, fieldManager string) (*ClusterNetworkApplyConfiguration, error) { + return extractClusterNetwork(clusterNetwork, fieldManager, "") +} + +// ExtractClusterNetworkStatus is the same as ExtractClusterNetwork except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractClusterNetworkStatus(clusterNetwork *networkv1.ClusterNetwork, fieldManager string) (*ClusterNetworkApplyConfiguration, error) { + return extractClusterNetwork(clusterNetwork, fieldManager, "status") +} + +func extractClusterNetwork(clusterNetwork *networkv1.ClusterNetwork, fieldManager string, subresource string) (*ClusterNetworkApplyConfiguration, error) { + b := &ClusterNetworkApplyConfiguration{} + err := managedfields.ExtractInto(clusterNetwork, internal.Parser().Type("com.github.openshift.api.network.v1.ClusterNetwork"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(clusterNetwork.Name) + + b.WithKind("ClusterNetwork") + b.WithAPIVersion("network.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ClusterNetworkApplyConfiguration) WithKind(value string) *ClusterNetworkApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ClusterNetworkApplyConfiguration) WithAPIVersion(value string) *ClusterNetworkApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ClusterNetworkApplyConfiguration) WithName(value string) *ClusterNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ClusterNetworkApplyConfiguration) WithGenerateName(value string) *ClusterNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ClusterNetworkApplyConfiguration) WithNamespace(value string) *ClusterNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ClusterNetworkApplyConfiguration) WithUID(value types.UID) *ClusterNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ClusterNetworkApplyConfiguration) WithResourceVersion(value string) *ClusterNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ClusterNetworkApplyConfiguration) WithGeneration(value int64) *ClusterNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ClusterNetworkApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ClusterNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ClusterNetworkApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ClusterNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ClusterNetworkApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ClusterNetworkApplyConfiguration) WithLabels(entries map[string]string) *ClusterNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ClusterNetworkApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ClusterNetworkApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ClusterNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ClusterNetworkApplyConfiguration) WithFinalizers(values ...string) *ClusterNetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ClusterNetworkApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithNetwork sets the Network field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Network field is set to the value of the last call. +func (b *ClusterNetworkApplyConfiguration) WithNetwork(value string) *ClusterNetworkApplyConfiguration { + b.Network = &value + return b +} + +// WithHostSubnetLength sets the HostSubnetLength field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostSubnetLength field is set to the value of the last call. +func (b *ClusterNetworkApplyConfiguration) WithHostSubnetLength(value uint32) *ClusterNetworkApplyConfiguration { + b.HostSubnetLength = &value + return b +} + +// WithServiceNetwork sets the ServiceNetwork field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceNetwork field is set to the value of the last call. +func (b *ClusterNetworkApplyConfiguration) WithServiceNetwork(value string) *ClusterNetworkApplyConfiguration { + b.ServiceNetwork = &value + return b +} + +// WithPluginName sets the PluginName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PluginName field is set to the value of the last call. +func (b *ClusterNetworkApplyConfiguration) WithPluginName(value string) *ClusterNetworkApplyConfiguration { + b.PluginName = &value + return b +} + +// WithClusterNetworks adds the given value to the ClusterNetworks field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ClusterNetworks field. +func (b *ClusterNetworkApplyConfiguration) WithClusterNetworks(values ...*ClusterNetworkEntryApplyConfiguration) *ClusterNetworkApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithClusterNetworks") + } + b.ClusterNetworks = append(b.ClusterNetworks, *values[i]) + } + return b +} + +// WithVXLANPort sets the VXLANPort field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VXLANPort field is set to the value of the last call. +func (b *ClusterNetworkApplyConfiguration) WithVXLANPort(value uint32) *ClusterNetworkApplyConfiguration { + b.VXLANPort = &value + return b +} + +// WithMTU sets the MTU field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MTU field is set to the value of the last call. +func (b *ClusterNetworkApplyConfiguration) WithMTU(value uint32) *ClusterNetworkApplyConfiguration { + b.MTU = &value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterNetworkApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/clusternetworkentry.go b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/clusternetworkentry.go new file mode 100644 index 0000000000000..066edaaf2e695 --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/clusternetworkentry.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ClusterNetworkEntryApplyConfiguration represents a declarative configuration of the ClusterNetworkEntry type for use +// with apply. +type ClusterNetworkEntryApplyConfiguration struct { + CIDR *string `json:"CIDR,omitempty"` + HostSubnetLength *uint32 `json:"hostSubnetLength,omitempty"` +} + +// ClusterNetworkEntryApplyConfiguration constructs a declarative configuration of the ClusterNetworkEntry type for use with +// apply. +func ClusterNetworkEntry() *ClusterNetworkEntryApplyConfiguration { + return &ClusterNetworkEntryApplyConfiguration{} +} + +// WithCIDR sets the CIDR field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CIDR field is set to the value of the last call. +func (b *ClusterNetworkEntryApplyConfiguration) WithCIDR(value string) *ClusterNetworkEntryApplyConfiguration { + b.CIDR = &value + return b +} + +// WithHostSubnetLength sets the HostSubnetLength field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostSubnetLength field is set to the value of the last call. +func (b *ClusterNetworkEntryApplyConfiguration) WithHostSubnetLength(value uint32) *ClusterNetworkEntryApplyConfiguration { + b.HostSubnetLength = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/egressnetworkpolicy.go b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/egressnetworkpolicy.go new file mode 100644 index 0000000000000..a4d76be63da8f --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/egressnetworkpolicy.go @@ -0,0 +1,239 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + networkv1 "github.com/openshift/api/network/v1" + internal "github.com/openshift/client-go/network/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// EgressNetworkPolicyApplyConfiguration represents a declarative configuration of the EgressNetworkPolicy type for use +// with apply. +type EgressNetworkPolicyApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *EgressNetworkPolicySpecApplyConfiguration `json:"spec,omitempty"` +} + +// EgressNetworkPolicy constructs a declarative configuration of the EgressNetworkPolicy type for use with +// apply. +func EgressNetworkPolicy(name, namespace string) *EgressNetworkPolicyApplyConfiguration { + b := &EgressNetworkPolicyApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("EgressNetworkPolicy") + b.WithAPIVersion("network.openshift.io/v1") + return b +} + +// ExtractEgressNetworkPolicy extracts the applied configuration owned by fieldManager from +// egressNetworkPolicy. If no managedFields are found in egressNetworkPolicy for fieldManager, a +// EgressNetworkPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// egressNetworkPolicy must be a unmodified EgressNetworkPolicy API object that was retrieved from the Kubernetes API. +// ExtractEgressNetworkPolicy provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractEgressNetworkPolicy(egressNetworkPolicy *networkv1.EgressNetworkPolicy, fieldManager string) (*EgressNetworkPolicyApplyConfiguration, error) { + return extractEgressNetworkPolicy(egressNetworkPolicy, fieldManager, "") +} + +// ExtractEgressNetworkPolicyStatus is the same as ExtractEgressNetworkPolicy except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractEgressNetworkPolicyStatus(egressNetworkPolicy *networkv1.EgressNetworkPolicy, fieldManager string) (*EgressNetworkPolicyApplyConfiguration, error) { + return extractEgressNetworkPolicy(egressNetworkPolicy, fieldManager, "status") +} + +func extractEgressNetworkPolicy(egressNetworkPolicy *networkv1.EgressNetworkPolicy, fieldManager string, subresource string) (*EgressNetworkPolicyApplyConfiguration, error) { + b := &EgressNetworkPolicyApplyConfiguration{} + err := managedfields.ExtractInto(egressNetworkPolicy, internal.Parser().Type("com.github.openshift.api.network.v1.EgressNetworkPolicy"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(egressNetworkPolicy.Name) + b.WithNamespace(egressNetworkPolicy.Namespace) + + b.WithKind("EgressNetworkPolicy") + b.WithAPIVersion("network.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *EgressNetworkPolicyApplyConfiguration) WithKind(value string) *EgressNetworkPolicyApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *EgressNetworkPolicyApplyConfiguration) WithAPIVersion(value string) *EgressNetworkPolicyApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *EgressNetworkPolicyApplyConfiguration) WithName(value string) *EgressNetworkPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *EgressNetworkPolicyApplyConfiguration) WithGenerateName(value string) *EgressNetworkPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *EgressNetworkPolicyApplyConfiguration) WithNamespace(value string) *EgressNetworkPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *EgressNetworkPolicyApplyConfiguration) WithUID(value types.UID) *EgressNetworkPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *EgressNetworkPolicyApplyConfiguration) WithResourceVersion(value string) *EgressNetworkPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *EgressNetworkPolicyApplyConfiguration) WithGeneration(value int64) *EgressNetworkPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *EgressNetworkPolicyApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *EgressNetworkPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *EgressNetworkPolicyApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *EgressNetworkPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *EgressNetworkPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EgressNetworkPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *EgressNetworkPolicyApplyConfiguration) WithLabels(entries map[string]string) *EgressNetworkPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *EgressNetworkPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *EgressNetworkPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *EgressNetworkPolicyApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *EgressNetworkPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *EgressNetworkPolicyApplyConfiguration) WithFinalizers(values ...string) *EgressNetworkPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *EgressNetworkPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *EgressNetworkPolicyApplyConfiguration) WithSpec(value *EgressNetworkPolicySpecApplyConfiguration) *EgressNetworkPolicyApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EgressNetworkPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/egressnetworkpolicypeer.go b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/egressnetworkpolicypeer.go new file mode 100644 index 0000000000000..e7b0e4640cb9b --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/egressnetworkpolicypeer.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// EgressNetworkPolicyPeerApplyConfiguration represents a declarative configuration of the EgressNetworkPolicyPeer type for use +// with apply. +type EgressNetworkPolicyPeerApplyConfiguration struct { + CIDRSelector *string `json:"cidrSelector,omitempty"` + DNSName *string `json:"dnsName,omitempty"` +} + +// EgressNetworkPolicyPeerApplyConfiguration constructs a declarative configuration of the EgressNetworkPolicyPeer type for use with +// apply. +func EgressNetworkPolicyPeer() *EgressNetworkPolicyPeerApplyConfiguration { + return &EgressNetworkPolicyPeerApplyConfiguration{} +} + +// WithCIDRSelector sets the CIDRSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CIDRSelector field is set to the value of the last call. +func (b *EgressNetworkPolicyPeerApplyConfiguration) WithCIDRSelector(value string) *EgressNetworkPolicyPeerApplyConfiguration { + b.CIDRSelector = &value + return b +} + +// WithDNSName sets the DNSName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DNSName field is set to the value of the last call. +func (b *EgressNetworkPolicyPeerApplyConfiguration) WithDNSName(value string) *EgressNetworkPolicyPeerApplyConfiguration { + b.DNSName = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/egressnetworkpolicyrule.go b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/egressnetworkpolicyrule.go new file mode 100644 index 0000000000000..1806975842342 --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/egressnetworkpolicyrule.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + networkv1 "github.com/openshift/api/network/v1" +) + +// EgressNetworkPolicyRuleApplyConfiguration represents a declarative configuration of the EgressNetworkPolicyRule type for use +// with apply. +type EgressNetworkPolicyRuleApplyConfiguration struct { + Type *networkv1.EgressNetworkPolicyRuleType `json:"type,omitempty"` + To *EgressNetworkPolicyPeerApplyConfiguration `json:"to,omitempty"` +} + +// EgressNetworkPolicyRuleApplyConfiguration constructs a declarative configuration of the EgressNetworkPolicyRule type for use with +// apply. +func EgressNetworkPolicyRule() *EgressNetworkPolicyRuleApplyConfiguration { + return &EgressNetworkPolicyRuleApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *EgressNetworkPolicyRuleApplyConfiguration) WithType(value networkv1.EgressNetworkPolicyRuleType) *EgressNetworkPolicyRuleApplyConfiguration { + b.Type = &value + return b +} + +// WithTo sets the To field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the To field is set to the value of the last call. +func (b *EgressNetworkPolicyRuleApplyConfiguration) WithTo(value *EgressNetworkPolicyPeerApplyConfiguration) *EgressNetworkPolicyRuleApplyConfiguration { + b.To = value + return b +} diff --git a/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/egressnetworkpolicyspec.go b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/egressnetworkpolicyspec.go new file mode 100644 index 0000000000000..09c685d2c9fc1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/egressnetworkpolicyspec.go @@ -0,0 +1,28 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// EgressNetworkPolicySpecApplyConfiguration represents a declarative configuration of the EgressNetworkPolicySpec type for use +// with apply. +type EgressNetworkPolicySpecApplyConfiguration struct { + Egress []EgressNetworkPolicyRuleApplyConfiguration `json:"egress,omitempty"` +} + +// EgressNetworkPolicySpecApplyConfiguration constructs a declarative configuration of the EgressNetworkPolicySpec type for use with +// apply. +func EgressNetworkPolicySpec() *EgressNetworkPolicySpecApplyConfiguration { + return &EgressNetworkPolicySpecApplyConfiguration{} +} + +// WithEgress adds the given value to the Egress field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Egress field. +func (b *EgressNetworkPolicySpecApplyConfiguration) WithEgress(values ...*EgressNetworkPolicyRuleApplyConfiguration) *EgressNetworkPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithEgress") + } + b.Egress = append(b.Egress, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/hostsubnet.go b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/hostsubnet.go new file mode 100644 index 0000000000000..713fc7c634bde --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/hostsubnet.go @@ -0,0 +1,277 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + networkv1 "github.com/openshift/api/network/v1" + internal "github.com/openshift/client-go/network/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// HostSubnetApplyConfiguration represents a declarative configuration of the HostSubnet type for use +// with apply. +type HostSubnetApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Host *string `json:"host,omitempty"` + HostIP *string `json:"hostIP,omitempty"` + Subnet *string `json:"subnet,omitempty"` + EgressIPs []networkv1.HostSubnetEgressIP `json:"egressIPs,omitempty"` + EgressCIDRs []networkv1.HostSubnetEgressCIDR `json:"egressCIDRs,omitempty"` +} + +// HostSubnet constructs a declarative configuration of the HostSubnet type for use with +// apply. +func HostSubnet(name string) *HostSubnetApplyConfiguration { + b := &HostSubnetApplyConfiguration{} + b.WithName(name) + b.WithKind("HostSubnet") + b.WithAPIVersion("network.openshift.io/v1") + return b +} + +// ExtractHostSubnet extracts the applied configuration owned by fieldManager from +// hostSubnet. If no managedFields are found in hostSubnet for fieldManager, a +// HostSubnetApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// hostSubnet must be a unmodified HostSubnet API object that was retrieved from the Kubernetes API. +// ExtractHostSubnet provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractHostSubnet(hostSubnet *networkv1.HostSubnet, fieldManager string) (*HostSubnetApplyConfiguration, error) { + return extractHostSubnet(hostSubnet, fieldManager, "") +} + +// ExtractHostSubnetStatus is the same as ExtractHostSubnet except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractHostSubnetStatus(hostSubnet *networkv1.HostSubnet, fieldManager string) (*HostSubnetApplyConfiguration, error) { + return extractHostSubnet(hostSubnet, fieldManager, "status") +} + +func extractHostSubnet(hostSubnet *networkv1.HostSubnet, fieldManager string, subresource string) (*HostSubnetApplyConfiguration, error) { + b := &HostSubnetApplyConfiguration{} + err := managedfields.ExtractInto(hostSubnet, internal.Parser().Type("com.github.openshift.api.network.v1.HostSubnet"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(hostSubnet.Name) + + b.WithKind("HostSubnet") + b.WithAPIVersion("network.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *HostSubnetApplyConfiguration) WithKind(value string) *HostSubnetApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *HostSubnetApplyConfiguration) WithAPIVersion(value string) *HostSubnetApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *HostSubnetApplyConfiguration) WithName(value string) *HostSubnetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *HostSubnetApplyConfiguration) WithGenerateName(value string) *HostSubnetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *HostSubnetApplyConfiguration) WithNamespace(value string) *HostSubnetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *HostSubnetApplyConfiguration) WithUID(value types.UID) *HostSubnetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *HostSubnetApplyConfiguration) WithResourceVersion(value string) *HostSubnetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *HostSubnetApplyConfiguration) WithGeneration(value int64) *HostSubnetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *HostSubnetApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *HostSubnetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *HostSubnetApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *HostSubnetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *HostSubnetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *HostSubnetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *HostSubnetApplyConfiguration) WithLabels(entries map[string]string) *HostSubnetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *HostSubnetApplyConfiguration) WithAnnotations(entries map[string]string) *HostSubnetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *HostSubnetApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *HostSubnetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *HostSubnetApplyConfiguration) WithFinalizers(values ...string) *HostSubnetApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *HostSubnetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithHost sets the Host field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Host field is set to the value of the last call. +func (b *HostSubnetApplyConfiguration) WithHost(value string) *HostSubnetApplyConfiguration { + b.Host = &value + return b +} + +// WithHostIP sets the HostIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostIP field is set to the value of the last call. +func (b *HostSubnetApplyConfiguration) WithHostIP(value string) *HostSubnetApplyConfiguration { + b.HostIP = &value + return b +} + +// WithSubnet sets the Subnet field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Subnet field is set to the value of the last call. +func (b *HostSubnetApplyConfiguration) WithSubnet(value string) *HostSubnetApplyConfiguration { + b.Subnet = &value + return b +} + +// WithEgressIPs adds the given value to the EgressIPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the EgressIPs field. +func (b *HostSubnetApplyConfiguration) WithEgressIPs(values ...networkv1.HostSubnetEgressIP) *HostSubnetApplyConfiguration { + for i := range values { + b.EgressIPs = append(b.EgressIPs, values[i]) + } + return b +} + +// WithEgressCIDRs adds the given value to the EgressCIDRs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the EgressCIDRs field. +func (b *HostSubnetApplyConfiguration) WithEgressCIDRs(values ...networkv1.HostSubnetEgressCIDR) *HostSubnetApplyConfiguration { + for i := range values { + b.EgressCIDRs = append(b.EgressCIDRs, values[i]) + } + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *HostSubnetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/netnamespace.go b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/netnamespace.go new file mode 100644 index 0000000000000..c821c0ae5d2bc --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1/netnamespace.go @@ -0,0 +1,257 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + networkv1 "github.com/openshift/api/network/v1" + internal "github.com/openshift/client-go/network/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// NetNamespaceApplyConfiguration represents a declarative configuration of the NetNamespace type for use +// with apply. +type NetNamespaceApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + NetName *string `json:"netname,omitempty"` + NetID *uint32 `json:"netid,omitempty"` + EgressIPs []networkv1.NetNamespaceEgressIP `json:"egressIPs,omitempty"` +} + +// NetNamespace constructs a declarative configuration of the NetNamespace type for use with +// apply. +func NetNamespace(name string) *NetNamespaceApplyConfiguration { + b := &NetNamespaceApplyConfiguration{} + b.WithName(name) + b.WithKind("NetNamespace") + b.WithAPIVersion("network.openshift.io/v1") + return b +} + +// ExtractNetNamespace extracts the applied configuration owned by fieldManager from +// netNamespace. If no managedFields are found in netNamespace for fieldManager, a +// NetNamespaceApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// netNamespace must be a unmodified NetNamespace API object that was retrieved from the Kubernetes API. +// ExtractNetNamespace provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractNetNamespace(netNamespace *networkv1.NetNamespace, fieldManager string) (*NetNamespaceApplyConfiguration, error) { + return extractNetNamespace(netNamespace, fieldManager, "") +} + +// ExtractNetNamespaceStatus is the same as ExtractNetNamespace except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractNetNamespaceStatus(netNamespace *networkv1.NetNamespace, fieldManager string) (*NetNamespaceApplyConfiguration, error) { + return extractNetNamespace(netNamespace, fieldManager, "status") +} + +func extractNetNamespace(netNamespace *networkv1.NetNamespace, fieldManager string, subresource string) (*NetNamespaceApplyConfiguration, error) { + b := &NetNamespaceApplyConfiguration{} + err := managedfields.ExtractInto(netNamespace, internal.Parser().Type("com.github.openshift.api.network.v1.NetNamespace"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(netNamespace.Name) + + b.WithKind("NetNamespace") + b.WithAPIVersion("network.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *NetNamespaceApplyConfiguration) WithKind(value string) *NetNamespaceApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *NetNamespaceApplyConfiguration) WithAPIVersion(value string) *NetNamespaceApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *NetNamespaceApplyConfiguration) WithName(value string) *NetNamespaceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *NetNamespaceApplyConfiguration) WithGenerateName(value string) *NetNamespaceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *NetNamespaceApplyConfiguration) WithNamespace(value string) *NetNamespaceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *NetNamespaceApplyConfiguration) WithUID(value types.UID) *NetNamespaceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *NetNamespaceApplyConfiguration) WithResourceVersion(value string) *NetNamespaceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *NetNamespaceApplyConfiguration) WithGeneration(value int64) *NetNamespaceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *NetNamespaceApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *NetNamespaceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *NetNamespaceApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *NetNamespaceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *NetNamespaceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NetNamespaceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *NetNamespaceApplyConfiguration) WithLabels(entries map[string]string) *NetNamespaceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *NetNamespaceApplyConfiguration) WithAnnotations(entries map[string]string) *NetNamespaceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *NetNamespaceApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *NetNamespaceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *NetNamespaceApplyConfiguration) WithFinalizers(values ...string) *NetNamespaceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *NetNamespaceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithNetName sets the NetName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NetName field is set to the value of the last call. +func (b *NetNamespaceApplyConfiguration) WithNetName(value string) *NetNamespaceApplyConfiguration { + b.NetName = &value + return b +} + +// WithNetID sets the NetID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NetID field is set to the value of the last call. +func (b *NetNamespaceApplyConfiguration) WithNetID(value uint32) *NetNamespaceApplyConfiguration { + b.NetID = &value + return b +} + +// WithEgressIPs adds the given value to the EgressIPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the EgressIPs field. +func (b *NetNamespaceApplyConfiguration) WithEgressIPs(values ...networkv1.NetNamespaceEgressIP) *NetNamespaceApplyConfiguration { + for i := range values { + b.EgressIPs = append(b.EgressIPs, values[i]) + } + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *NetNamespaceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1alpha1/dnsnameresolver.go b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1alpha1/dnsnameresolver.go new file mode 100644 index 0000000000000..8d5efa487b389 --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1alpha1/dnsnameresolver.go @@ -0,0 +1,248 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + networkv1alpha1 "github.com/openshift/api/network/v1alpha1" + internal "github.com/openshift/client-go/network/applyconfigurations/internal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// DNSNameResolverApplyConfiguration represents a declarative configuration of the DNSNameResolver type for use +// with apply. +type DNSNameResolverApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *DNSNameResolverSpecApplyConfiguration `json:"spec,omitempty"` + Status *DNSNameResolverStatusApplyConfiguration `json:"status,omitempty"` +} + +// DNSNameResolver constructs a declarative configuration of the DNSNameResolver type for use with +// apply. +func DNSNameResolver(name, namespace string) *DNSNameResolverApplyConfiguration { + b := &DNSNameResolverApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("DNSNameResolver") + b.WithAPIVersion("network.openshift.io/v1alpha1") + return b +} + +// ExtractDNSNameResolver extracts the applied configuration owned by fieldManager from +// dNSNameResolver. If no managedFields are found in dNSNameResolver for fieldManager, a +// DNSNameResolverApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// dNSNameResolver must be a unmodified DNSNameResolver API object that was retrieved from the Kubernetes API. +// ExtractDNSNameResolver provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractDNSNameResolver(dNSNameResolver *networkv1alpha1.DNSNameResolver, fieldManager string) (*DNSNameResolverApplyConfiguration, error) { + return extractDNSNameResolver(dNSNameResolver, fieldManager, "") +} + +// ExtractDNSNameResolverStatus is the same as ExtractDNSNameResolver except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractDNSNameResolverStatus(dNSNameResolver *networkv1alpha1.DNSNameResolver, fieldManager string) (*DNSNameResolverApplyConfiguration, error) { + return extractDNSNameResolver(dNSNameResolver, fieldManager, "status") +} + +func extractDNSNameResolver(dNSNameResolver *networkv1alpha1.DNSNameResolver, fieldManager string, subresource string) (*DNSNameResolverApplyConfiguration, error) { + b := &DNSNameResolverApplyConfiguration{} + err := managedfields.ExtractInto(dNSNameResolver, internal.Parser().Type("com.github.openshift.api.network.v1alpha1.DNSNameResolver"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(dNSNameResolver.Name) + b.WithNamespace(dNSNameResolver.Namespace) + + b.WithKind("DNSNameResolver") + b.WithAPIVersion("network.openshift.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *DNSNameResolverApplyConfiguration) WithKind(value string) *DNSNameResolverApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *DNSNameResolverApplyConfiguration) WithAPIVersion(value string) *DNSNameResolverApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *DNSNameResolverApplyConfiguration) WithName(value string) *DNSNameResolverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *DNSNameResolverApplyConfiguration) WithGenerateName(value string) *DNSNameResolverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *DNSNameResolverApplyConfiguration) WithNamespace(value string) *DNSNameResolverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *DNSNameResolverApplyConfiguration) WithUID(value types.UID) *DNSNameResolverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *DNSNameResolverApplyConfiguration) WithResourceVersion(value string) *DNSNameResolverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *DNSNameResolverApplyConfiguration) WithGeneration(value int64) *DNSNameResolverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *DNSNameResolverApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DNSNameResolverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *DNSNameResolverApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DNSNameResolverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *DNSNameResolverApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DNSNameResolverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *DNSNameResolverApplyConfiguration) WithLabels(entries map[string]string) *DNSNameResolverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *DNSNameResolverApplyConfiguration) WithAnnotations(entries map[string]string) *DNSNameResolverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *DNSNameResolverApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *DNSNameResolverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *DNSNameResolverApplyConfiguration) WithFinalizers(values ...string) *DNSNameResolverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *DNSNameResolverApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *DNSNameResolverApplyConfiguration) WithSpec(value *DNSNameResolverSpecApplyConfiguration) *DNSNameResolverApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *DNSNameResolverApplyConfiguration) WithStatus(value *DNSNameResolverStatusApplyConfiguration) *DNSNameResolverApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DNSNameResolverApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1alpha1/dnsnameresolverresolvedaddress.go b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1alpha1/dnsnameresolverresolvedaddress.go new file mode 100644 index 0000000000000..66cda97e4f158 --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1alpha1/dnsnameresolverresolvedaddress.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// DNSNameResolverResolvedAddressApplyConfiguration represents a declarative configuration of the DNSNameResolverResolvedAddress type for use +// with apply. +type DNSNameResolverResolvedAddressApplyConfiguration struct { + IP *string `json:"ip,omitempty"` + TTLSeconds *int32 `json:"ttlSeconds,omitempty"` + LastLookupTime *v1.Time `json:"lastLookupTime,omitempty"` +} + +// DNSNameResolverResolvedAddressApplyConfiguration constructs a declarative configuration of the DNSNameResolverResolvedAddress type for use with +// apply. +func DNSNameResolverResolvedAddress() *DNSNameResolverResolvedAddressApplyConfiguration { + return &DNSNameResolverResolvedAddressApplyConfiguration{} +} + +// WithIP sets the IP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IP field is set to the value of the last call. +func (b *DNSNameResolverResolvedAddressApplyConfiguration) WithIP(value string) *DNSNameResolverResolvedAddressApplyConfiguration { + b.IP = &value + return b +} + +// WithTTLSeconds sets the TTLSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TTLSeconds field is set to the value of the last call. +func (b *DNSNameResolverResolvedAddressApplyConfiguration) WithTTLSeconds(value int32) *DNSNameResolverResolvedAddressApplyConfiguration { + b.TTLSeconds = &value + return b +} + +// WithLastLookupTime sets the LastLookupTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastLookupTime field is set to the value of the last call. +func (b *DNSNameResolverResolvedAddressApplyConfiguration) WithLastLookupTime(value v1.Time) *DNSNameResolverResolvedAddressApplyConfiguration { + b.LastLookupTime = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1alpha1/dnsnameresolverresolvedname.go b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1alpha1/dnsnameresolverresolvedname.go new file mode 100644 index 0000000000000..15d3e7e20ec1d --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1alpha1/dnsnameresolverresolvedname.go @@ -0,0 +1,65 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + networkv1alpha1 "github.com/openshift/api/network/v1alpha1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// DNSNameResolverResolvedNameApplyConfiguration represents a declarative configuration of the DNSNameResolverResolvedName type for use +// with apply. +type DNSNameResolverResolvedNameApplyConfiguration struct { + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` + DNSName *networkv1alpha1.DNSName `json:"dnsName,omitempty"` + ResolvedAddresses []DNSNameResolverResolvedAddressApplyConfiguration `json:"resolvedAddresses,omitempty"` + ResolutionFailures *int32 `json:"resolutionFailures,omitempty"` +} + +// DNSNameResolverResolvedNameApplyConfiguration constructs a declarative configuration of the DNSNameResolverResolvedName type for use with +// apply. +func DNSNameResolverResolvedName() *DNSNameResolverResolvedNameApplyConfiguration { + return &DNSNameResolverResolvedNameApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *DNSNameResolverResolvedNameApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *DNSNameResolverResolvedNameApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithDNSName sets the DNSName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DNSName field is set to the value of the last call. +func (b *DNSNameResolverResolvedNameApplyConfiguration) WithDNSName(value networkv1alpha1.DNSName) *DNSNameResolverResolvedNameApplyConfiguration { + b.DNSName = &value + return b +} + +// WithResolvedAddresses adds the given value to the ResolvedAddresses field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResolvedAddresses field. +func (b *DNSNameResolverResolvedNameApplyConfiguration) WithResolvedAddresses(values ...*DNSNameResolverResolvedAddressApplyConfiguration) *DNSNameResolverResolvedNameApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResolvedAddresses") + } + b.ResolvedAddresses = append(b.ResolvedAddresses, *values[i]) + } + return b +} + +// WithResolutionFailures sets the ResolutionFailures field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResolutionFailures field is set to the value of the last call. +func (b *DNSNameResolverResolvedNameApplyConfiguration) WithResolutionFailures(value int32) *DNSNameResolverResolvedNameApplyConfiguration { + b.ResolutionFailures = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1alpha1/dnsnameresolverspec.go b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1alpha1/dnsnameresolverspec.go new file mode 100644 index 0000000000000..96d13fae199a4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1alpha1/dnsnameresolverspec.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + networkv1alpha1 "github.com/openshift/api/network/v1alpha1" +) + +// DNSNameResolverSpecApplyConfiguration represents a declarative configuration of the DNSNameResolverSpec type for use +// with apply. +type DNSNameResolverSpecApplyConfiguration struct { + Name *networkv1alpha1.DNSName `json:"name,omitempty"` +} + +// DNSNameResolverSpecApplyConfiguration constructs a declarative configuration of the DNSNameResolverSpec type for use with +// apply. +func DNSNameResolverSpec() *DNSNameResolverSpecApplyConfiguration { + return &DNSNameResolverSpecApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *DNSNameResolverSpecApplyConfiguration) WithName(value networkv1alpha1.DNSName) *DNSNameResolverSpecApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1alpha1/dnsnameresolverstatus.go b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1alpha1/dnsnameresolverstatus.go new file mode 100644 index 0000000000000..234b82495b954 --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/applyconfigurations/network/v1alpha1/dnsnameresolverstatus.go @@ -0,0 +1,28 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// DNSNameResolverStatusApplyConfiguration represents a declarative configuration of the DNSNameResolverStatus type for use +// with apply. +type DNSNameResolverStatusApplyConfiguration struct { + ResolvedNames []DNSNameResolverResolvedNameApplyConfiguration `json:"resolvedNames,omitempty"` +} + +// DNSNameResolverStatusApplyConfiguration constructs a declarative configuration of the DNSNameResolverStatus type for use with +// apply. +func DNSNameResolverStatus() *DNSNameResolverStatusApplyConfiguration { + return &DNSNameResolverStatusApplyConfiguration{} +} + +// WithResolvedNames adds the given value to the ResolvedNames field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResolvedNames field. +func (b *DNSNameResolverStatusApplyConfiguration) WithResolvedNames(values ...*DNSNameResolverResolvedNameApplyConfiguration) *DNSNameResolverStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResolvedNames") + } + b.ResolvedNames = append(b.ResolvedNames, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/network/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/network/clientset/versioned/clientset.go new file mode 100644 index 0000000000000..4bc1c654491a1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/clientset/versioned/clientset.go @@ -0,0 +1,117 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + fmt "fmt" + http "net/http" + + networkv1 "github.com/openshift/client-go/network/clientset/versioned/typed/network/v1" + networkv1alpha1 "github.com/openshift/client-go/network/clientset/versioned/typed/network/v1alpha1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + NetworkV1() networkv1.NetworkV1Interface + NetworkV1alpha1() networkv1alpha1.NetworkV1alpha1Interface +} + +// Clientset contains the clients for groups. +type Clientset struct { + *discovery.DiscoveryClient + networkV1 *networkv1.NetworkV1Client + networkV1alpha1 *networkv1alpha1.NetworkV1alpha1Client +} + +// NetworkV1 retrieves the NetworkV1Client +func (c *Clientset) NetworkV1() networkv1.NetworkV1Interface { + return c.networkV1 +} + +// NetworkV1alpha1 retrieves the NetworkV1alpha1Client +func (c *Clientset) NetworkV1alpha1() networkv1alpha1.NetworkV1alpha1Interface { + return c.networkV1alpha1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.networkV1, err = networkv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + cs.networkV1alpha1, err = networkv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.networkV1 = networkv1.New(c) + cs.networkV1alpha1 = networkv1alpha1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/network/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/client-go/network/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000000..14db57a58f8d2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/clientset/versioned/scheme/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/openshift/client-go/network/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/network/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000000..9d90dd5ebc2e1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/clientset/versioned/scheme/register.go @@ -0,0 +1,42 @@ +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + networkv1 "github.com/openshift/api/network/v1" + networkv1alpha1 "github.com/openshift/api/network/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + networkv1.AddToScheme, + networkv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1/clusternetwork.go b/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1/clusternetwork.go new file mode 100644 index 0000000000000..5c20bc3879942 --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1/clusternetwork.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + networkv1 "github.com/openshift/api/network/v1" + applyconfigurationsnetworkv1 "github.com/openshift/client-go/network/applyconfigurations/network/v1" + scheme "github.com/openshift/client-go/network/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ClusterNetworksGetter has a method to return a ClusterNetworkInterface. +// A group's client should implement this interface. +type ClusterNetworksGetter interface { + ClusterNetworks() ClusterNetworkInterface +} + +// ClusterNetworkInterface has methods to work with ClusterNetwork resources. +type ClusterNetworkInterface interface { + Create(ctx context.Context, clusterNetwork *networkv1.ClusterNetwork, opts metav1.CreateOptions) (*networkv1.ClusterNetwork, error) + Update(ctx context.Context, clusterNetwork *networkv1.ClusterNetwork, opts metav1.UpdateOptions) (*networkv1.ClusterNetwork, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*networkv1.ClusterNetwork, error) + List(ctx context.Context, opts metav1.ListOptions) (*networkv1.ClusterNetworkList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *networkv1.ClusterNetwork, err error) + Apply(ctx context.Context, clusterNetwork *applyconfigurationsnetworkv1.ClusterNetworkApplyConfiguration, opts metav1.ApplyOptions) (result *networkv1.ClusterNetwork, err error) + ClusterNetworkExpansion +} + +// clusterNetworks implements ClusterNetworkInterface +type clusterNetworks struct { + *gentype.ClientWithListAndApply[*networkv1.ClusterNetwork, *networkv1.ClusterNetworkList, *applyconfigurationsnetworkv1.ClusterNetworkApplyConfiguration] +} + +// newClusterNetworks returns a ClusterNetworks +func newClusterNetworks(c *NetworkV1Client) *clusterNetworks { + return &clusterNetworks{ + gentype.NewClientWithListAndApply[*networkv1.ClusterNetwork, *networkv1.ClusterNetworkList, *applyconfigurationsnetworkv1.ClusterNetworkApplyConfiguration]( + "clusternetworks", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *networkv1.ClusterNetwork { return &networkv1.ClusterNetwork{} }, + func() *networkv1.ClusterNetworkList { return &networkv1.ClusterNetworkList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1/doc.go b/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1/doc.go new file mode 100644 index 0000000000000..225e6b2be34f2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1/egressnetworkpolicy.go b/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1/egressnetworkpolicy.go new file mode 100644 index 0000000000000..8c1c34db033ee --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1/egressnetworkpolicy.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + networkv1 "github.com/openshift/api/network/v1" + applyconfigurationsnetworkv1 "github.com/openshift/client-go/network/applyconfigurations/network/v1" + scheme "github.com/openshift/client-go/network/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// EgressNetworkPoliciesGetter has a method to return a EgressNetworkPolicyInterface. +// A group's client should implement this interface. +type EgressNetworkPoliciesGetter interface { + EgressNetworkPolicies(namespace string) EgressNetworkPolicyInterface +} + +// EgressNetworkPolicyInterface has methods to work with EgressNetworkPolicy resources. +type EgressNetworkPolicyInterface interface { + Create(ctx context.Context, egressNetworkPolicy *networkv1.EgressNetworkPolicy, opts metav1.CreateOptions) (*networkv1.EgressNetworkPolicy, error) + Update(ctx context.Context, egressNetworkPolicy *networkv1.EgressNetworkPolicy, opts metav1.UpdateOptions) (*networkv1.EgressNetworkPolicy, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*networkv1.EgressNetworkPolicy, error) + List(ctx context.Context, opts metav1.ListOptions) (*networkv1.EgressNetworkPolicyList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *networkv1.EgressNetworkPolicy, err error) + Apply(ctx context.Context, egressNetworkPolicy *applyconfigurationsnetworkv1.EgressNetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *networkv1.EgressNetworkPolicy, err error) + EgressNetworkPolicyExpansion +} + +// egressNetworkPolicies implements EgressNetworkPolicyInterface +type egressNetworkPolicies struct { + *gentype.ClientWithListAndApply[*networkv1.EgressNetworkPolicy, *networkv1.EgressNetworkPolicyList, *applyconfigurationsnetworkv1.EgressNetworkPolicyApplyConfiguration] +} + +// newEgressNetworkPolicies returns a EgressNetworkPolicies +func newEgressNetworkPolicies(c *NetworkV1Client, namespace string) *egressNetworkPolicies { + return &egressNetworkPolicies{ + gentype.NewClientWithListAndApply[*networkv1.EgressNetworkPolicy, *networkv1.EgressNetworkPolicyList, *applyconfigurationsnetworkv1.EgressNetworkPolicyApplyConfiguration]( + "egressnetworkpolicies", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *networkv1.EgressNetworkPolicy { return &networkv1.EgressNetworkPolicy{} }, + func() *networkv1.EgressNetworkPolicyList { return &networkv1.EgressNetworkPolicyList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1/generated_expansion.go new file mode 100644 index 0000000000000..14e656e32da7f --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1/generated_expansion.go @@ -0,0 +1,11 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type ClusterNetworkExpansion interface{} + +type EgressNetworkPolicyExpansion interface{} + +type HostSubnetExpansion interface{} + +type NetNamespaceExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1/hostsubnet.go b/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1/hostsubnet.go new file mode 100644 index 0000000000000..10ec19fd310ca --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1/hostsubnet.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + networkv1 "github.com/openshift/api/network/v1" + applyconfigurationsnetworkv1 "github.com/openshift/client-go/network/applyconfigurations/network/v1" + scheme "github.com/openshift/client-go/network/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// HostSubnetsGetter has a method to return a HostSubnetInterface. +// A group's client should implement this interface. +type HostSubnetsGetter interface { + HostSubnets() HostSubnetInterface +} + +// HostSubnetInterface has methods to work with HostSubnet resources. +type HostSubnetInterface interface { + Create(ctx context.Context, hostSubnet *networkv1.HostSubnet, opts metav1.CreateOptions) (*networkv1.HostSubnet, error) + Update(ctx context.Context, hostSubnet *networkv1.HostSubnet, opts metav1.UpdateOptions) (*networkv1.HostSubnet, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*networkv1.HostSubnet, error) + List(ctx context.Context, opts metav1.ListOptions) (*networkv1.HostSubnetList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *networkv1.HostSubnet, err error) + Apply(ctx context.Context, hostSubnet *applyconfigurationsnetworkv1.HostSubnetApplyConfiguration, opts metav1.ApplyOptions) (result *networkv1.HostSubnet, err error) + HostSubnetExpansion +} + +// hostSubnets implements HostSubnetInterface +type hostSubnets struct { + *gentype.ClientWithListAndApply[*networkv1.HostSubnet, *networkv1.HostSubnetList, *applyconfigurationsnetworkv1.HostSubnetApplyConfiguration] +} + +// newHostSubnets returns a HostSubnets +func newHostSubnets(c *NetworkV1Client) *hostSubnets { + return &hostSubnets{ + gentype.NewClientWithListAndApply[*networkv1.HostSubnet, *networkv1.HostSubnetList, *applyconfigurationsnetworkv1.HostSubnetApplyConfiguration]( + "hostsubnets", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *networkv1.HostSubnet { return &networkv1.HostSubnet{} }, + func() *networkv1.HostSubnetList { return &networkv1.HostSubnetList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1/netnamespace.go b/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1/netnamespace.go new file mode 100644 index 0000000000000..b1cae0b73df4f --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1/netnamespace.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + networkv1 "github.com/openshift/api/network/v1" + applyconfigurationsnetworkv1 "github.com/openshift/client-go/network/applyconfigurations/network/v1" + scheme "github.com/openshift/client-go/network/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// NetNamespacesGetter has a method to return a NetNamespaceInterface. +// A group's client should implement this interface. +type NetNamespacesGetter interface { + NetNamespaces() NetNamespaceInterface +} + +// NetNamespaceInterface has methods to work with NetNamespace resources. +type NetNamespaceInterface interface { + Create(ctx context.Context, netNamespace *networkv1.NetNamespace, opts metav1.CreateOptions) (*networkv1.NetNamespace, error) + Update(ctx context.Context, netNamespace *networkv1.NetNamespace, opts metav1.UpdateOptions) (*networkv1.NetNamespace, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*networkv1.NetNamespace, error) + List(ctx context.Context, opts metav1.ListOptions) (*networkv1.NetNamespaceList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *networkv1.NetNamespace, err error) + Apply(ctx context.Context, netNamespace *applyconfigurationsnetworkv1.NetNamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *networkv1.NetNamespace, err error) + NetNamespaceExpansion +} + +// netNamespaces implements NetNamespaceInterface +type netNamespaces struct { + *gentype.ClientWithListAndApply[*networkv1.NetNamespace, *networkv1.NetNamespaceList, *applyconfigurationsnetworkv1.NetNamespaceApplyConfiguration] +} + +// newNetNamespaces returns a NetNamespaces +func newNetNamespaces(c *NetworkV1Client) *netNamespaces { + return &netNamespaces{ + gentype.NewClientWithListAndApply[*networkv1.NetNamespace, *networkv1.NetNamespaceList, *applyconfigurationsnetworkv1.NetNamespaceApplyConfiguration]( + "netnamespaces", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *networkv1.NetNamespace { return &networkv1.NetNamespace{} }, + func() *networkv1.NetNamespaceList { return &networkv1.NetNamespaceList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1/network_client.go b/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1/network_client.go new file mode 100644 index 0000000000000..b6042cdcd242c --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1/network_client.go @@ -0,0 +1,106 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + http "net/http" + + networkv1 "github.com/openshift/api/network/v1" + scheme "github.com/openshift/client-go/network/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type NetworkV1Interface interface { + RESTClient() rest.Interface + ClusterNetworksGetter + EgressNetworkPoliciesGetter + HostSubnetsGetter + NetNamespacesGetter +} + +// NetworkV1Client is used to interact with features provided by the network.openshift.io group. +type NetworkV1Client struct { + restClient rest.Interface +} + +func (c *NetworkV1Client) ClusterNetworks() ClusterNetworkInterface { + return newClusterNetworks(c) +} + +func (c *NetworkV1Client) EgressNetworkPolicies(namespace string) EgressNetworkPolicyInterface { + return newEgressNetworkPolicies(c, namespace) +} + +func (c *NetworkV1Client) HostSubnets() HostSubnetInterface { + return newHostSubnets(c) +} + +func (c *NetworkV1Client) NetNamespaces() NetNamespaceInterface { + return newNetNamespaces(c) +} + +// NewForConfig creates a new NetworkV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*NetworkV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new NetworkV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*NetworkV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &NetworkV1Client{client}, nil +} + +// NewForConfigOrDie creates a new NetworkV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NetworkV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NetworkV1Client for the given RESTClient. +func New(c rest.Interface) *NetworkV1Client { + return &NetworkV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := networkv1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NetworkV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1alpha1/dnsnameresolver.go b/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1alpha1/dnsnameresolver.go new file mode 100644 index 0000000000000..871641eaf28f2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1alpha1/dnsnameresolver.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + + networkv1alpha1 "github.com/openshift/api/network/v1alpha1" + applyconfigurationsnetworkv1alpha1 "github.com/openshift/client-go/network/applyconfigurations/network/v1alpha1" + scheme "github.com/openshift/client-go/network/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// DNSNameResolversGetter has a method to return a DNSNameResolverInterface. +// A group's client should implement this interface. +type DNSNameResolversGetter interface { + DNSNameResolvers(namespace string) DNSNameResolverInterface +} + +// DNSNameResolverInterface has methods to work with DNSNameResolver resources. +type DNSNameResolverInterface interface { + Create(ctx context.Context, dNSNameResolver *networkv1alpha1.DNSNameResolver, opts v1.CreateOptions) (*networkv1alpha1.DNSNameResolver, error) + Update(ctx context.Context, dNSNameResolver *networkv1alpha1.DNSNameResolver, opts v1.UpdateOptions) (*networkv1alpha1.DNSNameResolver, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, dNSNameResolver *networkv1alpha1.DNSNameResolver, opts v1.UpdateOptions) (*networkv1alpha1.DNSNameResolver, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*networkv1alpha1.DNSNameResolver, error) + List(ctx context.Context, opts v1.ListOptions) (*networkv1alpha1.DNSNameResolverList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkv1alpha1.DNSNameResolver, err error) + Apply(ctx context.Context, dNSNameResolver *applyconfigurationsnetworkv1alpha1.DNSNameResolverApplyConfiguration, opts v1.ApplyOptions) (result *networkv1alpha1.DNSNameResolver, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, dNSNameResolver *applyconfigurationsnetworkv1alpha1.DNSNameResolverApplyConfiguration, opts v1.ApplyOptions) (result *networkv1alpha1.DNSNameResolver, err error) + DNSNameResolverExpansion +} + +// dNSNameResolvers implements DNSNameResolverInterface +type dNSNameResolvers struct { + *gentype.ClientWithListAndApply[*networkv1alpha1.DNSNameResolver, *networkv1alpha1.DNSNameResolverList, *applyconfigurationsnetworkv1alpha1.DNSNameResolverApplyConfiguration] +} + +// newDNSNameResolvers returns a DNSNameResolvers +func newDNSNameResolvers(c *NetworkV1alpha1Client, namespace string) *dNSNameResolvers { + return &dNSNameResolvers{ + gentype.NewClientWithListAndApply[*networkv1alpha1.DNSNameResolver, *networkv1alpha1.DNSNameResolverList, *applyconfigurationsnetworkv1alpha1.DNSNameResolverApplyConfiguration]( + "dnsnameresolvers", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *networkv1alpha1.DNSNameResolver { return &networkv1alpha1.DNSNameResolver{} }, + func() *networkv1alpha1.DNSNameResolverList { return &networkv1alpha1.DNSNameResolverList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1alpha1/doc.go b/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1alpha1/doc.go new file mode 100644 index 0000000000000..93a7ca4e0e2b9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1alpha1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1alpha1/generated_expansion.go b/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1alpha1/generated_expansion.go new file mode 100644 index 0000000000000..53f71dd2fdc4b --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1alpha1/generated_expansion.go @@ -0,0 +1,5 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type DNSNameResolverExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1alpha1/network_client.go b/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1alpha1/network_client.go new file mode 100644 index 0000000000000..02ec4142e61b0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/clientset/versioned/typed/network/v1alpha1/network_client.go @@ -0,0 +1,91 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + http "net/http" + + networkv1alpha1 "github.com/openshift/api/network/v1alpha1" + scheme "github.com/openshift/client-go/network/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type NetworkV1alpha1Interface interface { + RESTClient() rest.Interface + DNSNameResolversGetter +} + +// NetworkV1alpha1Client is used to interact with features provided by the network.openshift.io group. +type NetworkV1alpha1Client struct { + restClient rest.Interface +} + +func (c *NetworkV1alpha1Client) DNSNameResolvers(namespace string) DNSNameResolverInterface { + return newDNSNameResolvers(c, namespace) +} + +// NewForConfig creates a new NetworkV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*NetworkV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new NetworkV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*NetworkV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &NetworkV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new NetworkV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NetworkV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NetworkV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *NetworkV1alpha1Client { + return &NetworkV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := networkv1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NetworkV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/client-go/network/informers/externalversions/factory.go b/vendor/github.com/openshift/client-go/network/informers/externalversions/factory.go new file mode 100644 index 0000000000000..88987115f40f7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/informers/externalversions/factory.go @@ -0,0 +1,246 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/openshift/client-go/network/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/network/informers/externalversions/internalinterfaces" + network "github.com/openshift/client-go/network/informers/externalversions/network" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + transform cache.TransformFunc + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.shuttingDown { + return + } + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() + f.startedInformers[informerType] = true + } + } +} + +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + + Network() network.Interface +} + +func (f *sharedInformerFactory) Network() network.Interface { + return network.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/network/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/network/informers/externalversions/generic.go new file mode 100644 index 0000000000000..885ca8b06fae9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/informers/externalversions/generic.go @@ -0,0 +1,57 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + fmt "fmt" + + v1 "github.com/openshift/api/network/v1" + v1alpha1 "github.com/openshift/api/network/v1alpha1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=network.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("clusternetworks"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Network().V1().ClusterNetworks().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("egressnetworkpolicies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Network().V1().EgressNetworkPolicies().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("hostsubnets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Network().V1().HostSubnets().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("netnamespaces"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Network().V1().NetNamespaces().Informer()}, nil + + // Group=network.openshift.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("dnsnameresolvers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Network().V1alpha1().DNSNameResolvers().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/openshift/client-go/network/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/openshift/client-go/network/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000000..27f8e38600660 --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,24 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/openshift/client-go/network/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/openshift/client-go/network/informers/externalversions/network/interface.go b/vendor/github.com/openshift/client-go/network/informers/externalversions/network/interface.go new file mode 100644 index 0000000000000..db364bdb3bce4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/informers/externalversions/network/interface.go @@ -0,0 +1,38 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package network + +import ( + internalinterfaces "github.com/openshift/client-go/network/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/network/informers/externalversions/network/v1" + v1alpha1 "github.com/openshift/client-go/network/informers/externalversions/network/v1alpha1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/network/informers/externalversions/network/v1/clusternetwork.go b/vendor/github.com/openshift/client-go/network/informers/externalversions/network/v1/clusternetwork.go new file mode 100644 index 0000000000000..0c1e0725e457a --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/informers/externalversions/network/v1/clusternetwork.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apinetworkv1 "github.com/openshift/api/network/v1" + versioned "github.com/openshift/client-go/network/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/network/informers/externalversions/internalinterfaces" + networkv1 "github.com/openshift/client-go/network/listers/network/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterNetworkInformer provides access to a shared informer and lister for +// ClusterNetworks. +type ClusterNetworkInformer interface { + Informer() cache.SharedIndexInformer + Lister() networkv1.ClusterNetworkLister +} + +type clusterNetworkInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterNetworkInformer constructs a new informer for ClusterNetwork type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterNetworkInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterNetworkInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterNetworkInformer constructs a new informer for ClusterNetwork type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterNetworkInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkV1().ClusterNetworks().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkV1().ClusterNetworks().Watch(context.TODO(), options) + }, + }, + &apinetworkv1.ClusterNetwork{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterNetworkInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterNetworkInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterNetworkInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apinetworkv1.ClusterNetwork{}, f.defaultInformer) +} + +func (f *clusterNetworkInformer) Lister() networkv1.ClusterNetworkLister { + return networkv1.NewClusterNetworkLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/network/informers/externalversions/network/v1/egressnetworkpolicy.go b/vendor/github.com/openshift/client-go/network/informers/externalversions/network/v1/egressnetworkpolicy.go new file mode 100644 index 0000000000000..6d7c3139ee7ce --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/informers/externalversions/network/v1/egressnetworkpolicy.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apinetworkv1 "github.com/openshift/api/network/v1" + versioned "github.com/openshift/client-go/network/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/network/informers/externalversions/internalinterfaces" + networkv1 "github.com/openshift/client-go/network/listers/network/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// EgressNetworkPolicyInformer provides access to a shared informer and lister for +// EgressNetworkPolicies. +type EgressNetworkPolicyInformer interface { + Informer() cache.SharedIndexInformer + Lister() networkv1.EgressNetworkPolicyLister +} + +type egressNetworkPolicyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewEgressNetworkPolicyInformer constructs a new informer for EgressNetworkPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewEgressNetworkPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredEgressNetworkPolicyInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredEgressNetworkPolicyInformer constructs a new informer for EgressNetworkPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredEgressNetworkPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkV1().EgressNetworkPolicies(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkV1().EgressNetworkPolicies(namespace).Watch(context.TODO(), options) + }, + }, + &apinetworkv1.EgressNetworkPolicy{}, + resyncPeriod, + indexers, + ) +} + +func (f *egressNetworkPolicyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredEgressNetworkPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *egressNetworkPolicyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apinetworkv1.EgressNetworkPolicy{}, f.defaultInformer) +} + +func (f *egressNetworkPolicyInformer) Lister() networkv1.EgressNetworkPolicyLister { + return networkv1.NewEgressNetworkPolicyLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/network/informers/externalversions/network/v1/hostsubnet.go b/vendor/github.com/openshift/client-go/network/informers/externalversions/network/v1/hostsubnet.go new file mode 100644 index 0000000000000..74147da3fbcac --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/informers/externalversions/network/v1/hostsubnet.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apinetworkv1 "github.com/openshift/api/network/v1" + versioned "github.com/openshift/client-go/network/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/network/informers/externalversions/internalinterfaces" + networkv1 "github.com/openshift/client-go/network/listers/network/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// HostSubnetInformer provides access to a shared informer and lister for +// HostSubnets. +type HostSubnetInformer interface { + Informer() cache.SharedIndexInformer + Lister() networkv1.HostSubnetLister +} + +type hostSubnetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewHostSubnetInformer constructs a new informer for HostSubnet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewHostSubnetInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredHostSubnetInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredHostSubnetInformer constructs a new informer for HostSubnet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredHostSubnetInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkV1().HostSubnets().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkV1().HostSubnets().Watch(context.TODO(), options) + }, + }, + &apinetworkv1.HostSubnet{}, + resyncPeriod, + indexers, + ) +} + +func (f *hostSubnetInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredHostSubnetInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *hostSubnetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apinetworkv1.HostSubnet{}, f.defaultInformer) +} + +func (f *hostSubnetInformer) Lister() networkv1.HostSubnetLister { + return networkv1.NewHostSubnetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/network/informers/externalversions/network/v1/interface.go b/vendor/github.com/openshift/client-go/network/informers/externalversions/network/v1/interface.go new file mode 100644 index 0000000000000..1f696be4fa30e --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/informers/externalversions/network/v1/interface.go @@ -0,0 +1,50 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/openshift/client-go/network/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ClusterNetworks returns a ClusterNetworkInformer. + ClusterNetworks() ClusterNetworkInformer + // EgressNetworkPolicies returns a EgressNetworkPolicyInformer. + EgressNetworkPolicies() EgressNetworkPolicyInformer + // HostSubnets returns a HostSubnetInformer. + HostSubnets() HostSubnetInformer + // NetNamespaces returns a NetNamespaceInformer. + NetNamespaces() NetNamespaceInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ClusterNetworks returns a ClusterNetworkInformer. +func (v *version) ClusterNetworks() ClusterNetworkInformer { + return &clusterNetworkInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// EgressNetworkPolicies returns a EgressNetworkPolicyInformer. +func (v *version) EgressNetworkPolicies() EgressNetworkPolicyInformer { + return &egressNetworkPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// HostSubnets returns a HostSubnetInformer. +func (v *version) HostSubnets() HostSubnetInformer { + return &hostSubnetInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// NetNamespaces returns a NetNamespaceInformer. +func (v *version) NetNamespaces() NetNamespaceInformer { + return &netNamespaceInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/network/informers/externalversions/network/v1/netnamespace.go b/vendor/github.com/openshift/client-go/network/informers/externalversions/network/v1/netnamespace.go new file mode 100644 index 0000000000000..5f8567d6b30c0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/informers/externalversions/network/v1/netnamespace.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apinetworkv1 "github.com/openshift/api/network/v1" + versioned "github.com/openshift/client-go/network/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/network/informers/externalversions/internalinterfaces" + networkv1 "github.com/openshift/client-go/network/listers/network/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// NetNamespaceInformer provides access to a shared informer and lister for +// NetNamespaces. +type NetNamespaceInformer interface { + Informer() cache.SharedIndexInformer + Lister() networkv1.NetNamespaceLister +} + +type netNamespaceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewNetNamespaceInformer constructs a new informer for NetNamespace type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewNetNamespaceInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredNetNamespaceInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredNetNamespaceInformer constructs a new informer for NetNamespace type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredNetNamespaceInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkV1().NetNamespaces().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkV1().NetNamespaces().Watch(context.TODO(), options) + }, + }, + &apinetworkv1.NetNamespace{}, + resyncPeriod, + indexers, + ) +} + +func (f *netNamespaceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredNetNamespaceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *netNamespaceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apinetworkv1.NetNamespace{}, f.defaultInformer) +} + +func (f *netNamespaceInformer) Lister() networkv1.NetNamespaceLister { + return networkv1.NewNetNamespaceLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/network/informers/externalversions/network/v1alpha1/dnsnameresolver.go b/vendor/github.com/openshift/client-go/network/informers/externalversions/network/v1alpha1/dnsnameresolver.go new file mode 100644 index 0000000000000..5123527b2af5b --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/informers/externalversions/network/v1alpha1/dnsnameresolver.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + time "time" + + apinetworkv1alpha1 "github.com/openshift/api/network/v1alpha1" + versioned "github.com/openshift/client-go/network/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/network/informers/externalversions/internalinterfaces" + networkv1alpha1 "github.com/openshift/client-go/network/listers/network/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// DNSNameResolverInformer provides access to a shared informer and lister for +// DNSNameResolvers. +type DNSNameResolverInformer interface { + Informer() cache.SharedIndexInformer + Lister() networkv1alpha1.DNSNameResolverLister +} + +type dNSNameResolverInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDNSNameResolverInformer constructs a new informer for DNSNameResolver type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDNSNameResolverInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDNSNameResolverInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDNSNameResolverInformer constructs a new informer for DNSNameResolver type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDNSNameResolverInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkV1alpha1().DNSNameResolvers(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkV1alpha1().DNSNameResolvers(namespace).Watch(context.TODO(), options) + }, + }, + &apinetworkv1alpha1.DNSNameResolver{}, + resyncPeriod, + indexers, + ) +} + +func (f *dNSNameResolverInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDNSNameResolverInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *dNSNameResolverInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apinetworkv1alpha1.DNSNameResolver{}, f.defaultInformer) +} + +func (f *dNSNameResolverInformer) Lister() networkv1alpha1.DNSNameResolverLister { + return networkv1alpha1.NewDNSNameResolverLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/network/informers/externalversions/network/v1alpha1/interface.go b/vendor/github.com/openshift/client-go/network/informers/externalversions/network/v1alpha1/interface.go new file mode 100644 index 0000000000000..f85902d0b07bb --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/informers/externalversions/network/v1alpha1/interface.go @@ -0,0 +1,29 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "github.com/openshift/client-go/network/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // DNSNameResolvers returns a DNSNameResolverInformer. + DNSNameResolvers() DNSNameResolverInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// DNSNameResolvers returns a DNSNameResolverInformer. +func (v *version) DNSNameResolvers() DNSNameResolverInformer { + return &dNSNameResolverInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/network/listers/network/v1/clusternetwork.go b/vendor/github.com/openshift/client-go/network/listers/network/v1/clusternetwork.go new file mode 100644 index 0000000000000..cfa283faddaec --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/listers/network/v1/clusternetwork.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + networkv1 "github.com/openshift/api/network/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterNetworkLister helps list ClusterNetworks. +// All objects returned here must be treated as read-only. +type ClusterNetworkLister interface { + // List lists all ClusterNetworks in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*networkv1.ClusterNetwork, err error) + // Get retrieves the ClusterNetwork from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*networkv1.ClusterNetwork, error) + ClusterNetworkListerExpansion +} + +// clusterNetworkLister implements the ClusterNetworkLister interface. +type clusterNetworkLister struct { + listers.ResourceIndexer[*networkv1.ClusterNetwork] +} + +// NewClusterNetworkLister returns a new ClusterNetworkLister. +func NewClusterNetworkLister(indexer cache.Indexer) ClusterNetworkLister { + return &clusterNetworkLister{listers.New[*networkv1.ClusterNetwork](indexer, networkv1.Resource("clusternetwork"))} +} diff --git a/vendor/github.com/openshift/client-go/network/listers/network/v1/egressnetworkpolicy.go b/vendor/github.com/openshift/client-go/network/listers/network/v1/egressnetworkpolicy.go new file mode 100644 index 0000000000000..c7eb08a60d11f --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/listers/network/v1/egressnetworkpolicy.go @@ -0,0 +1,54 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + networkv1 "github.com/openshift/api/network/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// EgressNetworkPolicyLister helps list EgressNetworkPolicies. +// All objects returned here must be treated as read-only. +type EgressNetworkPolicyLister interface { + // List lists all EgressNetworkPolicies in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*networkv1.EgressNetworkPolicy, err error) + // EgressNetworkPolicies returns an object that can list and get EgressNetworkPolicies. + EgressNetworkPolicies(namespace string) EgressNetworkPolicyNamespaceLister + EgressNetworkPolicyListerExpansion +} + +// egressNetworkPolicyLister implements the EgressNetworkPolicyLister interface. +type egressNetworkPolicyLister struct { + listers.ResourceIndexer[*networkv1.EgressNetworkPolicy] +} + +// NewEgressNetworkPolicyLister returns a new EgressNetworkPolicyLister. +func NewEgressNetworkPolicyLister(indexer cache.Indexer) EgressNetworkPolicyLister { + return &egressNetworkPolicyLister{listers.New[*networkv1.EgressNetworkPolicy](indexer, networkv1.Resource("egressnetworkpolicy"))} +} + +// EgressNetworkPolicies returns an object that can list and get EgressNetworkPolicies. +func (s *egressNetworkPolicyLister) EgressNetworkPolicies(namespace string) EgressNetworkPolicyNamespaceLister { + return egressNetworkPolicyNamespaceLister{listers.NewNamespaced[*networkv1.EgressNetworkPolicy](s.ResourceIndexer, namespace)} +} + +// EgressNetworkPolicyNamespaceLister helps list and get EgressNetworkPolicies. +// All objects returned here must be treated as read-only. +type EgressNetworkPolicyNamespaceLister interface { + // List lists all EgressNetworkPolicies in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*networkv1.EgressNetworkPolicy, err error) + // Get retrieves the EgressNetworkPolicy from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*networkv1.EgressNetworkPolicy, error) + EgressNetworkPolicyNamespaceListerExpansion +} + +// egressNetworkPolicyNamespaceLister implements the EgressNetworkPolicyNamespaceLister +// interface. +type egressNetworkPolicyNamespaceLister struct { + listers.ResourceIndexer[*networkv1.EgressNetworkPolicy] +} diff --git a/vendor/github.com/openshift/client-go/network/listers/network/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/network/listers/network/v1/expansion_generated.go new file mode 100644 index 0000000000000..41f06c59aea68 --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/listers/network/v1/expansion_generated.go @@ -0,0 +1,23 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// ClusterNetworkListerExpansion allows custom methods to be added to +// ClusterNetworkLister. +type ClusterNetworkListerExpansion interface{} + +// EgressNetworkPolicyListerExpansion allows custom methods to be added to +// EgressNetworkPolicyLister. +type EgressNetworkPolicyListerExpansion interface{} + +// EgressNetworkPolicyNamespaceListerExpansion allows custom methods to be added to +// EgressNetworkPolicyNamespaceLister. +type EgressNetworkPolicyNamespaceListerExpansion interface{} + +// HostSubnetListerExpansion allows custom methods to be added to +// HostSubnetLister. +type HostSubnetListerExpansion interface{} + +// NetNamespaceListerExpansion allows custom methods to be added to +// NetNamespaceLister. +type NetNamespaceListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/network/listers/network/v1/hostsubnet.go b/vendor/github.com/openshift/client-go/network/listers/network/v1/hostsubnet.go new file mode 100644 index 0000000000000..991f17e11b1cf --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/listers/network/v1/hostsubnet.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + networkv1 "github.com/openshift/api/network/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// HostSubnetLister helps list HostSubnets. +// All objects returned here must be treated as read-only. +type HostSubnetLister interface { + // List lists all HostSubnets in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*networkv1.HostSubnet, err error) + // Get retrieves the HostSubnet from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*networkv1.HostSubnet, error) + HostSubnetListerExpansion +} + +// hostSubnetLister implements the HostSubnetLister interface. +type hostSubnetLister struct { + listers.ResourceIndexer[*networkv1.HostSubnet] +} + +// NewHostSubnetLister returns a new HostSubnetLister. +func NewHostSubnetLister(indexer cache.Indexer) HostSubnetLister { + return &hostSubnetLister{listers.New[*networkv1.HostSubnet](indexer, networkv1.Resource("hostsubnet"))} +} diff --git a/vendor/github.com/openshift/client-go/network/listers/network/v1/netnamespace.go b/vendor/github.com/openshift/client-go/network/listers/network/v1/netnamespace.go new file mode 100644 index 0000000000000..0836f70450d2b --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/listers/network/v1/netnamespace.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + networkv1 "github.com/openshift/api/network/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// NetNamespaceLister helps list NetNamespaces. +// All objects returned here must be treated as read-only. +type NetNamespaceLister interface { + // List lists all NetNamespaces in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*networkv1.NetNamespace, err error) + // Get retrieves the NetNamespace from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*networkv1.NetNamespace, error) + NetNamespaceListerExpansion +} + +// netNamespaceLister implements the NetNamespaceLister interface. +type netNamespaceLister struct { + listers.ResourceIndexer[*networkv1.NetNamespace] +} + +// NewNetNamespaceLister returns a new NetNamespaceLister. +func NewNetNamespaceLister(indexer cache.Indexer) NetNamespaceLister { + return &netNamespaceLister{listers.New[*networkv1.NetNamespace](indexer, networkv1.Resource("netnamespace"))} +} diff --git a/vendor/github.com/openshift/client-go/network/listers/network/v1alpha1/dnsnameresolver.go b/vendor/github.com/openshift/client-go/network/listers/network/v1alpha1/dnsnameresolver.go new file mode 100644 index 0000000000000..58bb04173540e --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/listers/network/v1alpha1/dnsnameresolver.go @@ -0,0 +1,54 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + networkv1alpha1 "github.com/openshift/api/network/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// DNSNameResolverLister helps list DNSNameResolvers. +// All objects returned here must be treated as read-only. +type DNSNameResolverLister interface { + // List lists all DNSNameResolvers in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*networkv1alpha1.DNSNameResolver, err error) + // DNSNameResolvers returns an object that can list and get DNSNameResolvers. + DNSNameResolvers(namespace string) DNSNameResolverNamespaceLister + DNSNameResolverListerExpansion +} + +// dNSNameResolverLister implements the DNSNameResolverLister interface. +type dNSNameResolverLister struct { + listers.ResourceIndexer[*networkv1alpha1.DNSNameResolver] +} + +// NewDNSNameResolverLister returns a new DNSNameResolverLister. +func NewDNSNameResolverLister(indexer cache.Indexer) DNSNameResolverLister { + return &dNSNameResolverLister{listers.New[*networkv1alpha1.DNSNameResolver](indexer, networkv1alpha1.Resource("dnsnameresolver"))} +} + +// DNSNameResolvers returns an object that can list and get DNSNameResolvers. +func (s *dNSNameResolverLister) DNSNameResolvers(namespace string) DNSNameResolverNamespaceLister { + return dNSNameResolverNamespaceLister{listers.NewNamespaced[*networkv1alpha1.DNSNameResolver](s.ResourceIndexer, namespace)} +} + +// DNSNameResolverNamespaceLister helps list and get DNSNameResolvers. +// All objects returned here must be treated as read-only. +type DNSNameResolverNamespaceLister interface { + // List lists all DNSNameResolvers in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*networkv1alpha1.DNSNameResolver, err error) + // Get retrieves the DNSNameResolver from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*networkv1alpha1.DNSNameResolver, error) + DNSNameResolverNamespaceListerExpansion +} + +// dNSNameResolverNamespaceLister implements the DNSNameResolverNamespaceLister +// interface. +type dNSNameResolverNamespaceLister struct { + listers.ResourceIndexer[*networkv1alpha1.DNSNameResolver] +} diff --git a/vendor/github.com/openshift/client-go/network/listers/network/v1alpha1/expansion_generated.go b/vendor/github.com/openshift/client-go/network/listers/network/v1alpha1/expansion_generated.go new file mode 100644 index 0000000000000..1daec45b54de2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/network/listers/network/v1alpha1/expansion_generated.go @@ -0,0 +1,11 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// DNSNameResolverListerExpansion allows custom methods to be added to +// DNSNameResolverLister. +type DNSNameResolverListerExpansion interface{} + +// DNSNameResolverNamespaceListerExpansion allows custom methods to be added to +// DNSNameResolverNamespaceLister. +type DNSNameResolverNamespaceListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/oauth/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/oauth/applyconfigurations/internal/internal.go new file mode 100644 index 0000000000000..ab048a8595979 --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/applyconfigurations/internal/internal.go @@ -0,0 +1,407 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package internal + +import ( + fmt "fmt" + sync "sync" + + typed "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +func Parser() *typed.Parser { + parserOnce.Do(func() { + var err error + parser, err = typed.NewParser(schemaYAML) + if err != nil { + panic(fmt.Sprintf("Failed to parse schema: %v", err)) + } + }) + return parser +} + +var parserOnce sync.Once +var parser *typed.Parser +var schemaYAML = typed.YAMLObject(`types: +- name: com.github.openshift.api.oauth.v1.ClusterRoleScopeRestriction + map: + fields: + - name: allowEscalation + type: + scalar: boolean + default: false + - name: namespaces + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: roleNames + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.oauth.v1.OAuthAccessToken + map: + fields: + - name: apiVersion + type: + scalar: string + - name: authorizeToken + type: + scalar: string + - name: clientName + type: + scalar: string + - name: expiresIn + type: + scalar: numeric + - name: inactivityTimeoutSeconds + type: + scalar: numeric + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: redirectURI + type: + scalar: string + - name: refreshToken + type: + scalar: string + - name: scopes + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: userName + type: + scalar: string + - name: userUID + type: + scalar: string +- name: com.github.openshift.api.oauth.v1.OAuthAuthorizeToken + map: + fields: + - name: apiVersion + type: + scalar: string + - name: clientName + type: + scalar: string + - name: codeChallenge + type: + scalar: string + - name: codeChallengeMethod + type: + scalar: string + - name: expiresIn + type: + scalar: numeric + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: redirectURI + type: + scalar: string + - name: scopes + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: state + type: + scalar: string + - name: userName + type: + scalar: string + - name: userUID + type: + scalar: string +- name: com.github.openshift.api.oauth.v1.OAuthClient + map: + fields: + - name: accessTokenInactivityTimeoutSeconds + type: + scalar: numeric + - name: accessTokenMaxAgeSeconds + type: + scalar: numeric + - name: additionalSecrets + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: apiVersion + type: + scalar: string + - name: grantMethod + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: redirectURIs + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: respondWithChallenges + type: + scalar: boolean + - name: scopeRestrictions + type: + list: + elementType: + namedType: com.github.openshift.api.oauth.v1.ScopeRestriction + elementRelationship: atomic + - name: secret + type: + scalar: string +- name: com.github.openshift.api.oauth.v1.OAuthClientAuthorization + map: + fields: + - name: apiVersion + type: + scalar: string + - name: clientName + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: scopes + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: userName + type: + scalar: string + - name: userUID + type: + scalar: string +- name: com.github.openshift.api.oauth.v1.ScopeRestriction + map: + fields: + - name: clusterRole + type: + namedType: com.github.openshift.api.oauth.v1.ClusterRoleScopeRestriction + - name: literals + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.oauth.v1.UserOAuthAccessToken + map: + fields: + - name: apiVersion + type: + scalar: string + - name: authorizeToken + type: + scalar: string + - name: clientName + type: + scalar: string + - name: expiresIn + type: + scalar: numeric + - name: inactivityTimeoutSeconds + type: + scalar: numeric + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: redirectURI + type: + scalar: string + - name: refreshToken + type: + scalar: string + - name: scopes + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: userName + type: + scalar: string + - name: userUID + type: + scalar: string +- name: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldsType + type: + scalar: string + - name: fieldsV1 + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + - name: manager + type: + scalar: string + - name: operation + type: + scalar: string + - name: subresource + type: + scalar: string + - name: time + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + map: + fields: + - name: annotations + type: + map: + elementType: + scalar: string + - name: creationTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: deletionGracePeriodSeconds + type: + scalar: numeric + - name: deletionTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: finalizers + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: generateName + type: + scalar: string + - name: generation + type: + scalar: numeric + - name: labels + type: + map: + elementType: + scalar: string + - name: managedFields + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + elementRelationship: atomic + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: ownerReferences + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + elementRelationship: associative + keys: + - uid + - name: resourceVersion + type: + scalar: string + - name: selfLink + type: + scalar: string + - name: uid + type: + scalar: string +- name: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + map: + fields: + - name: apiVersion + type: + scalar: string + default: "" + - name: blockOwnerDeletion + type: + scalar: boolean + - name: controller + type: + scalar: boolean + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: uid + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Time + scalar: untyped +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +- name: __untyped_deduced_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +`) diff --git a/vendor/github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1/clusterrolescoperestriction.go b/vendor/github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1/clusterrolescoperestriction.go new file mode 100644 index 0000000000000..692ef70516f27 --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1/clusterrolescoperestriction.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ClusterRoleScopeRestrictionApplyConfiguration represents a declarative configuration of the ClusterRoleScopeRestriction type for use +// with apply. +type ClusterRoleScopeRestrictionApplyConfiguration struct { + RoleNames []string `json:"roleNames,omitempty"` + Namespaces []string `json:"namespaces,omitempty"` + AllowEscalation *bool `json:"allowEscalation,omitempty"` +} + +// ClusterRoleScopeRestrictionApplyConfiguration constructs a declarative configuration of the ClusterRoleScopeRestriction type for use with +// apply. +func ClusterRoleScopeRestriction() *ClusterRoleScopeRestrictionApplyConfiguration { + return &ClusterRoleScopeRestrictionApplyConfiguration{} +} + +// WithRoleNames adds the given value to the RoleNames field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RoleNames field. +func (b *ClusterRoleScopeRestrictionApplyConfiguration) WithRoleNames(values ...string) *ClusterRoleScopeRestrictionApplyConfiguration { + for i := range values { + b.RoleNames = append(b.RoleNames, values[i]) + } + return b +} + +// WithNamespaces adds the given value to the Namespaces field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Namespaces field. +func (b *ClusterRoleScopeRestrictionApplyConfiguration) WithNamespaces(values ...string) *ClusterRoleScopeRestrictionApplyConfiguration { + for i := range values { + b.Namespaces = append(b.Namespaces, values[i]) + } + return b +} + +// WithAllowEscalation sets the AllowEscalation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllowEscalation field is set to the value of the last call. +func (b *ClusterRoleScopeRestrictionApplyConfiguration) WithAllowEscalation(value bool) *ClusterRoleScopeRestrictionApplyConfiguration { + b.AllowEscalation = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1/oauthaccesstoken.go b/vendor/github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1/oauthaccesstoken.go new file mode 100644 index 0000000000000..959f5cd2abadb --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1/oauthaccesstoken.go @@ -0,0 +1,311 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + oauthv1 "github.com/openshift/api/oauth/v1" + internal "github.com/openshift/client-go/oauth/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// OAuthAccessTokenApplyConfiguration represents a declarative configuration of the OAuthAccessToken type for use +// with apply. +type OAuthAccessTokenApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + ClientName *string `json:"clientName,omitempty"` + ExpiresIn *int64 `json:"expiresIn,omitempty"` + Scopes []string `json:"scopes,omitempty"` + RedirectURI *string `json:"redirectURI,omitempty"` + UserName *string `json:"userName,omitempty"` + UserUID *string `json:"userUID,omitempty"` + AuthorizeToken *string `json:"authorizeToken,omitempty"` + RefreshToken *string `json:"refreshToken,omitempty"` + InactivityTimeoutSeconds *int32 `json:"inactivityTimeoutSeconds,omitempty"` +} + +// OAuthAccessToken constructs a declarative configuration of the OAuthAccessToken type for use with +// apply. +func OAuthAccessToken(name string) *OAuthAccessTokenApplyConfiguration { + b := &OAuthAccessTokenApplyConfiguration{} + b.WithName(name) + b.WithKind("OAuthAccessToken") + b.WithAPIVersion("oauth.openshift.io/v1") + return b +} + +// ExtractOAuthAccessToken extracts the applied configuration owned by fieldManager from +// oAuthAccessToken. If no managedFields are found in oAuthAccessToken for fieldManager, a +// OAuthAccessTokenApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// oAuthAccessToken must be a unmodified OAuthAccessToken API object that was retrieved from the Kubernetes API. +// ExtractOAuthAccessToken provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractOAuthAccessToken(oAuthAccessToken *oauthv1.OAuthAccessToken, fieldManager string) (*OAuthAccessTokenApplyConfiguration, error) { + return extractOAuthAccessToken(oAuthAccessToken, fieldManager, "") +} + +// ExtractOAuthAccessTokenStatus is the same as ExtractOAuthAccessToken except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractOAuthAccessTokenStatus(oAuthAccessToken *oauthv1.OAuthAccessToken, fieldManager string) (*OAuthAccessTokenApplyConfiguration, error) { + return extractOAuthAccessToken(oAuthAccessToken, fieldManager, "status") +} + +func extractOAuthAccessToken(oAuthAccessToken *oauthv1.OAuthAccessToken, fieldManager string, subresource string) (*OAuthAccessTokenApplyConfiguration, error) { + b := &OAuthAccessTokenApplyConfiguration{} + err := managedfields.ExtractInto(oAuthAccessToken, internal.Parser().Type("com.github.openshift.api.oauth.v1.OAuthAccessToken"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(oAuthAccessToken.Name) + + b.WithKind("OAuthAccessToken") + b.WithAPIVersion("oauth.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *OAuthAccessTokenApplyConfiguration) WithKind(value string) *OAuthAccessTokenApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *OAuthAccessTokenApplyConfiguration) WithAPIVersion(value string) *OAuthAccessTokenApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *OAuthAccessTokenApplyConfiguration) WithName(value string) *OAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *OAuthAccessTokenApplyConfiguration) WithGenerateName(value string) *OAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *OAuthAccessTokenApplyConfiguration) WithNamespace(value string) *OAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *OAuthAccessTokenApplyConfiguration) WithUID(value types.UID) *OAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *OAuthAccessTokenApplyConfiguration) WithResourceVersion(value string) *OAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *OAuthAccessTokenApplyConfiguration) WithGeneration(value int64) *OAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *OAuthAccessTokenApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *OAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *OAuthAccessTokenApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *OAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *OAuthAccessTokenApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *OAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *OAuthAccessTokenApplyConfiguration) WithLabels(entries map[string]string) *OAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *OAuthAccessTokenApplyConfiguration) WithAnnotations(entries map[string]string) *OAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *OAuthAccessTokenApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *OAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *OAuthAccessTokenApplyConfiguration) WithFinalizers(values ...string) *OAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *OAuthAccessTokenApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithClientName sets the ClientName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientName field is set to the value of the last call. +func (b *OAuthAccessTokenApplyConfiguration) WithClientName(value string) *OAuthAccessTokenApplyConfiguration { + b.ClientName = &value + return b +} + +// WithExpiresIn sets the ExpiresIn field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExpiresIn field is set to the value of the last call. +func (b *OAuthAccessTokenApplyConfiguration) WithExpiresIn(value int64) *OAuthAccessTokenApplyConfiguration { + b.ExpiresIn = &value + return b +} + +// WithScopes adds the given value to the Scopes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Scopes field. +func (b *OAuthAccessTokenApplyConfiguration) WithScopes(values ...string) *OAuthAccessTokenApplyConfiguration { + for i := range values { + b.Scopes = append(b.Scopes, values[i]) + } + return b +} + +// WithRedirectURI sets the RedirectURI field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RedirectURI field is set to the value of the last call. +func (b *OAuthAccessTokenApplyConfiguration) WithRedirectURI(value string) *OAuthAccessTokenApplyConfiguration { + b.RedirectURI = &value + return b +} + +// WithUserName sets the UserName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UserName field is set to the value of the last call. +func (b *OAuthAccessTokenApplyConfiguration) WithUserName(value string) *OAuthAccessTokenApplyConfiguration { + b.UserName = &value + return b +} + +// WithUserUID sets the UserUID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UserUID field is set to the value of the last call. +func (b *OAuthAccessTokenApplyConfiguration) WithUserUID(value string) *OAuthAccessTokenApplyConfiguration { + b.UserUID = &value + return b +} + +// WithAuthorizeToken sets the AuthorizeToken field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AuthorizeToken field is set to the value of the last call. +func (b *OAuthAccessTokenApplyConfiguration) WithAuthorizeToken(value string) *OAuthAccessTokenApplyConfiguration { + b.AuthorizeToken = &value + return b +} + +// WithRefreshToken sets the RefreshToken field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RefreshToken field is set to the value of the last call. +func (b *OAuthAccessTokenApplyConfiguration) WithRefreshToken(value string) *OAuthAccessTokenApplyConfiguration { + b.RefreshToken = &value + return b +} + +// WithInactivityTimeoutSeconds sets the InactivityTimeoutSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the InactivityTimeoutSeconds field is set to the value of the last call. +func (b *OAuthAccessTokenApplyConfiguration) WithInactivityTimeoutSeconds(value int32) *OAuthAccessTokenApplyConfiguration { + b.InactivityTimeoutSeconds = &value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *OAuthAccessTokenApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1/oauthauthorizetoken.go b/vendor/github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1/oauthauthorizetoken.go new file mode 100644 index 0000000000000..3ec75e9a22eeb --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1/oauthauthorizetoken.go @@ -0,0 +1,311 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + oauthv1 "github.com/openshift/api/oauth/v1" + internal "github.com/openshift/client-go/oauth/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// OAuthAuthorizeTokenApplyConfiguration represents a declarative configuration of the OAuthAuthorizeToken type for use +// with apply. +type OAuthAuthorizeTokenApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + ClientName *string `json:"clientName,omitempty"` + ExpiresIn *int64 `json:"expiresIn,omitempty"` + Scopes []string `json:"scopes,omitempty"` + RedirectURI *string `json:"redirectURI,omitempty"` + State *string `json:"state,omitempty"` + UserName *string `json:"userName,omitempty"` + UserUID *string `json:"userUID,omitempty"` + CodeChallenge *string `json:"codeChallenge,omitempty"` + CodeChallengeMethod *string `json:"codeChallengeMethod,omitempty"` +} + +// OAuthAuthorizeToken constructs a declarative configuration of the OAuthAuthorizeToken type for use with +// apply. +func OAuthAuthorizeToken(name string) *OAuthAuthorizeTokenApplyConfiguration { + b := &OAuthAuthorizeTokenApplyConfiguration{} + b.WithName(name) + b.WithKind("OAuthAuthorizeToken") + b.WithAPIVersion("oauth.openshift.io/v1") + return b +} + +// ExtractOAuthAuthorizeToken extracts the applied configuration owned by fieldManager from +// oAuthAuthorizeToken. If no managedFields are found in oAuthAuthorizeToken for fieldManager, a +// OAuthAuthorizeTokenApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// oAuthAuthorizeToken must be a unmodified OAuthAuthorizeToken API object that was retrieved from the Kubernetes API. +// ExtractOAuthAuthorizeToken provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractOAuthAuthorizeToken(oAuthAuthorizeToken *oauthv1.OAuthAuthorizeToken, fieldManager string) (*OAuthAuthorizeTokenApplyConfiguration, error) { + return extractOAuthAuthorizeToken(oAuthAuthorizeToken, fieldManager, "") +} + +// ExtractOAuthAuthorizeTokenStatus is the same as ExtractOAuthAuthorizeToken except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractOAuthAuthorizeTokenStatus(oAuthAuthorizeToken *oauthv1.OAuthAuthorizeToken, fieldManager string) (*OAuthAuthorizeTokenApplyConfiguration, error) { + return extractOAuthAuthorizeToken(oAuthAuthorizeToken, fieldManager, "status") +} + +func extractOAuthAuthorizeToken(oAuthAuthorizeToken *oauthv1.OAuthAuthorizeToken, fieldManager string, subresource string) (*OAuthAuthorizeTokenApplyConfiguration, error) { + b := &OAuthAuthorizeTokenApplyConfiguration{} + err := managedfields.ExtractInto(oAuthAuthorizeToken, internal.Parser().Type("com.github.openshift.api.oauth.v1.OAuthAuthorizeToken"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(oAuthAuthorizeToken.Name) + + b.WithKind("OAuthAuthorizeToken") + b.WithAPIVersion("oauth.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithKind(value string) *OAuthAuthorizeTokenApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithAPIVersion(value string) *OAuthAuthorizeTokenApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithName(value string) *OAuthAuthorizeTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithGenerateName(value string) *OAuthAuthorizeTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithNamespace(value string) *OAuthAuthorizeTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithUID(value types.UID) *OAuthAuthorizeTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithResourceVersion(value string) *OAuthAuthorizeTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithGeneration(value int64) *OAuthAuthorizeTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *OAuthAuthorizeTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *OAuthAuthorizeTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *OAuthAuthorizeTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithLabels(entries map[string]string) *OAuthAuthorizeTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithAnnotations(entries map[string]string) *OAuthAuthorizeTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *OAuthAuthorizeTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithFinalizers(values ...string) *OAuthAuthorizeTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *OAuthAuthorizeTokenApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithClientName sets the ClientName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientName field is set to the value of the last call. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithClientName(value string) *OAuthAuthorizeTokenApplyConfiguration { + b.ClientName = &value + return b +} + +// WithExpiresIn sets the ExpiresIn field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExpiresIn field is set to the value of the last call. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithExpiresIn(value int64) *OAuthAuthorizeTokenApplyConfiguration { + b.ExpiresIn = &value + return b +} + +// WithScopes adds the given value to the Scopes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Scopes field. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithScopes(values ...string) *OAuthAuthorizeTokenApplyConfiguration { + for i := range values { + b.Scopes = append(b.Scopes, values[i]) + } + return b +} + +// WithRedirectURI sets the RedirectURI field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RedirectURI field is set to the value of the last call. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithRedirectURI(value string) *OAuthAuthorizeTokenApplyConfiguration { + b.RedirectURI = &value + return b +} + +// WithState sets the State field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the State field is set to the value of the last call. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithState(value string) *OAuthAuthorizeTokenApplyConfiguration { + b.State = &value + return b +} + +// WithUserName sets the UserName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UserName field is set to the value of the last call. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithUserName(value string) *OAuthAuthorizeTokenApplyConfiguration { + b.UserName = &value + return b +} + +// WithUserUID sets the UserUID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UserUID field is set to the value of the last call. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithUserUID(value string) *OAuthAuthorizeTokenApplyConfiguration { + b.UserUID = &value + return b +} + +// WithCodeChallenge sets the CodeChallenge field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CodeChallenge field is set to the value of the last call. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithCodeChallenge(value string) *OAuthAuthorizeTokenApplyConfiguration { + b.CodeChallenge = &value + return b +} + +// WithCodeChallengeMethod sets the CodeChallengeMethod field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CodeChallengeMethod field is set to the value of the last call. +func (b *OAuthAuthorizeTokenApplyConfiguration) WithCodeChallengeMethod(value string) *OAuthAuthorizeTokenApplyConfiguration { + b.CodeChallengeMethod = &value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *OAuthAuthorizeTokenApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1/oauthclient.go b/vendor/github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1/oauthclient.go new file mode 100644 index 0000000000000..b1f851543f016 --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1/oauthclient.go @@ -0,0 +1,309 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + oauthv1 "github.com/openshift/api/oauth/v1" + internal "github.com/openshift/client-go/oauth/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// OAuthClientApplyConfiguration represents a declarative configuration of the OAuthClient type for use +// with apply. +type OAuthClientApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Secret *string `json:"secret,omitempty"` + AdditionalSecrets []string `json:"additionalSecrets,omitempty"` + RespondWithChallenges *bool `json:"respondWithChallenges,omitempty"` + RedirectURIs []string `json:"redirectURIs,omitempty"` + GrantMethod *oauthv1.GrantHandlerType `json:"grantMethod,omitempty"` + ScopeRestrictions []ScopeRestrictionApplyConfiguration `json:"scopeRestrictions,omitempty"` + AccessTokenMaxAgeSeconds *int32 `json:"accessTokenMaxAgeSeconds,omitempty"` + AccessTokenInactivityTimeoutSeconds *int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty"` +} + +// OAuthClient constructs a declarative configuration of the OAuthClient type for use with +// apply. +func OAuthClient(name string) *OAuthClientApplyConfiguration { + b := &OAuthClientApplyConfiguration{} + b.WithName(name) + b.WithKind("OAuthClient") + b.WithAPIVersion("oauth.openshift.io/v1") + return b +} + +// ExtractOAuthClient extracts the applied configuration owned by fieldManager from +// oAuthClient. If no managedFields are found in oAuthClient for fieldManager, a +// OAuthClientApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// oAuthClient must be a unmodified OAuthClient API object that was retrieved from the Kubernetes API. +// ExtractOAuthClient provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractOAuthClient(oAuthClient *oauthv1.OAuthClient, fieldManager string) (*OAuthClientApplyConfiguration, error) { + return extractOAuthClient(oAuthClient, fieldManager, "") +} + +// ExtractOAuthClientStatus is the same as ExtractOAuthClient except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractOAuthClientStatus(oAuthClient *oauthv1.OAuthClient, fieldManager string) (*OAuthClientApplyConfiguration, error) { + return extractOAuthClient(oAuthClient, fieldManager, "status") +} + +func extractOAuthClient(oAuthClient *oauthv1.OAuthClient, fieldManager string, subresource string) (*OAuthClientApplyConfiguration, error) { + b := &OAuthClientApplyConfiguration{} + err := managedfields.ExtractInto(oAuthClient, internal.Parser().Type("com.github.openshift.api.oauth.v1.OAuthClient"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(oAuthClient.Name) + + b.WithKind("OAuthClient") + b.WithAPIVersion("oauth.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *OAuthClientApplyConfiguration) WithKind(value string) *OAuthClientApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *OAuthClientApplyConfiguration) WithAPIVersion(value string) *OAuthClientApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *OAuthClientApplyConfiguration) WithName(value string) *OAuthClientApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *OAuthClientApplyConfiguration) WithGenerateName(value string) *OAuthClientApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *OAuthClientApplyConfiguration) WithNamespace(value string) *OAuthClientApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *OAuthClientApplyConfiguration) WithUID(value types.UID) *OAuthClientApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *OAuthClientApplyConfiguration) WithResourceVersion(value string) *OAuthClientApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *OAuthClientApplyConfiguration) WithGeneration(value int64) *OAuthClientApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *OAuthClientApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *OAuthClientApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *OAuthClientApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *OAuthClientApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *OAuthClientApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *OAuthClientApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *OAuthClientApplyConfiguration) WithLabels(entries map[string]string) *OAuthClientApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *OAuthClientApplyConfiguration) WithAnnotations(entries map[string]string) *OAuthClientApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *OAuthClientApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *OAuthClientApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *OAuthClientApplyConfiguration) WithFinalizers(values ...string) *OAuthClientApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *OAuthClientApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSecret sets the Secret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Secret field is set to the value of the last call. +func (b *OAuthClientApplyConfiguration) WithSecret(value string) *OAuthClientApplyConfiguration { + b.Secret = &value + return b +} + +// WithAdditionalSecrets adds the given value to the AdditionalSecrets field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AdditionalSecrets field. +func (b *OAuthClientApplyConfiguration) WithAdditionalSecrets(values ...string) *OAuthClientApplyConfiguration { + for i := range values { + b.AdditionalSecrets = append(b.AdditionalSecrets, values[i]) + } + return b +} + +// WithRespondWithChallenges sets the RespondWithChallenges field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RespondWithChallenges field is set to the value of the last call. +func (b *OAuthClientApplyConfiguration) WithRespondWithChallenges(value bool) *OAuthClientApplyConfiguration { + b.RespondWithChallenges = &value + return b +} + +// WithRedirectURIs adds the given value to the RedirectURIs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RedirectURIs field. +func (b *OAuthClientApplyConfiguration) WithRedirectURIs(values ...string) *OAuthClientApplyConfiguration { + for i := range values { + b.RedirectURIs = append(b.RedirectURIs, values[i]) + } + return b +} + +// WithGrantMethod sets the GrantMethod field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GrantMethod field is set to the value of the last call. +func (b *OAuthClientApplyConfiguration) WithGrantMethod(value oauthv1.GrantHandlerType) *OAuthClientApplyConfiguration { + b.GrantMethod = &value + return b +} + +// WithScopeRestrictions adds the given value to the ScopeRestrictions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ScopeRestrictions field. +func (b *OAuthClientApplyConfiguration) WithScopeRestrictions(values ...*ScopeRestrictionApplyConfiguration) *OAuthClientApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithScopeRestrictions") + } + b.ScopeRestrictions = append(b.ScopeRestrictions, *values[i]) + } + return b +} + +// WithAccessTokenMaxAgeSeconds sets the AccessTokenMaxAgeSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AccessTokenMaxAgeSeconds field is set to the value of the last call. +func (b *OAuthClientApplyConfiguration) WithAccessTokenMaxAgeSeconds(value int32) *OAuthClientApplyConfiguration { + b.AccessTokenMaxAgeSeconds = &value + return b +} + +// WithAccessTokenInactivityTimeoutSeconds sets the AccessTokenInactivityTimeoutSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AccessTokenInactivityTimeoutSeconds field is set to the value of the last call. +func (b *OAuthClientApplyConfiguration) WithAccessTokenInactivityTimeoutSeconds(value int32) *OAuthClientApplyConfiguration { + b.AccessTokenInactivityTimeoutSeconds = &value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *OAuthClientApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1/oauthclientauthorization.go b/vendor/github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1/oauthclientauthorization.go new file mode 100644 index 0000000000000..838cd4b307246 --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1/oauthclientauthorization.go @@ -0,0 +1,266 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + oauthv1 "github.com/openshift/api/oauth/v1" + internal "github.com/openshift/client-go/oauth/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// OAuthClientAuthorizationApplyConfiguration represents a declarative configuration of the OAuthClientAuthorization type for use +// with apply. +type OAuthClientAuthorizationApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + ClientName *string `json:"clientName,omitempty"` + UserName *string `json:"userName,omitempty"` + UserUID *string `json:"userUID,omitempty"` + Scopes []string `json:"scopes,omitempty"` +} + +// OAuthClientAuthorization constructs a declarative configuration of the OAuthClientAuthorization type for use with +// apply. +func OAuthClientAuthorization(name string) *OAuthClientAuthorizationApplyConfiguration { + b := &OAuthClientAuthorizationApplyConfiguration{} + b.WithName(name) + b.WithKind("OAuthClientAuthorization") + b.WithAPIVersion("oauth.openshift.io/v1") + return b +} + +// ExtractOAuthClientAuthorization extracts the applied configuration owned by fieldManager from +// oAuthClientAuthorization. If no managedFields are found in oAuthClientAuthorization for fieldManager, a +// OAuthClientAuthorizationApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// oAuthClientAuthorization must be a unmodified OAuthClientAuthorization API object that was retrieved from the Kubernetes API. +// ExtractOAuthClientAuthorization provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractOAuthClientAuthorization(oAuthClientAuthorization *oauthv1.OAuthClientAuthorization, fieldManager string) (*OAuthClientAuthorizationApplyConfiguration, error) { + return extractOAuthClientAuthorization(oAuthClientAuthorization, fieldManager, "") +} + +// ExtractOAuthClientAuthorizationStatus is the same as ExtractOAuthClientAuthorization except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractOAuthClientAuthorizationStatus(oAuthClientAuthorization *oauthv1.OAuthClientAuthorization, fieldManager string) (*OAuthClientAuthorizationApplyConfiguration, error) { + return extractOAuthClientAuthorization(oAuthClientAuthorization, fieldManager, "status") +} + +func extractOAuthClientAuthorization(oAuthClientAuthorization *oauthv1.OAuthClientAuthorization, fieldManager string, subresource string) (*OAuthClientAuthorizationApplyConfiguration, error) { + b := &OAuthClientAuthorizationApplyConfiguration{} + err := managedfields.ExtractInto(oAuthClientAuthorization, internal.Parser().Type("com.github.openshift.api.oauth.v1.OAuthClientAuthorization"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(oAuthClientAuthorization.Name) + + b.WithKind("OAuthClientAuthorization") + b.WithAPIVersion("oauth.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *OAuthClientAuthorizationApplyConfiguration) WithKind(value string) *OAuthClientAuthorizationApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *OAuthClientAuthorizationApplyConfiguration) WithAPIVersion(value string) *OAuthClientAuthorizationApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *OAuthClientAuthorizationApplyConfiguration) WithName(value string) *OAuthClientAuthorizationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *OAuthClientAuthorizationApplyConfiguration) WithGenerateName(value string) *OAuthClientAuthorizationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *OAuthClientAuthorizationApplyConfiguration) WithNamespace(value string) *OAuthClientAuthorizationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *OAuthClientAuthorizationApplyConfiguration) WithUID(value types.UID) *OAuthClientAuthorizationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *OAuthClientAuthorizationApplyConfiguration) WithResourceVersion(value string) *OAuthClientAuthorizationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *OAuthClientAuthorizationApplyConfiguration) WithGeneration(value int64) *OAuthClientAuthorizationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *OAuthClientAuthorizationApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *OAuthClientAuthorizationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *OAuthClientAuthorizationApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *OAuthClientAuthorizationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *OAuthClientAuthorizationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *OAuthClientAuthorizationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *OAuthClientAuthorizationApplyConfiguration) WithLabels(entries map[string]string) *OAuthClientAuthorizationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *OAuthClientAuthorizationApplyConfiguration) WithAnnotations(entries map[string]string) *OAuthClientAuthorizationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *OAuthClientAuthorizationApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *OAuthClientAuthorizationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *OAuthClientAuthorizationApplyConfiguration) WithFinalizers(values ...string) *OAuthClientAuthorizationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *OAuthClientAuthorizationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithClientName sets the ClientName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientName field is set to the value of the last call. +func (b *OAuthClientAuthorizationApplyConfiguration) WithClientName(value string) *OAuthClientAuthorizationApplyConfiguration { + b.ClientName = &value + return b +} + +// WithUserName sets the UserName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UserName field is set to the value of the last call. +func (b *OAuthClientAuthorizationApplyConfiguration) WithUserName(value string) *OAuthClientAuthorizationApplyConfiguration { + b.UserName = &value + return b +} + +// WithUserUID sets the UserUID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UserUID field is set to the value of the last call. +func (b *OAuthClientAuthorizationApplyConfiguration) WithUserUID(value string) *OAuthClientAuthorizationApplyConfiguration { + b.UserUID = &value + return b +} + +// WithScopes adds the given value to the Scopes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Scopes field. +func (b *OAuthClientAuthorizationApplyConfiguration) WithScopes(values ...string) *OAuthClientAuthorizationApplyConfiguration { + for i := range values { + b.Scopes = append(b.Scopes, values[i]) + } + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *OAuthClientAuthorizationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1/scoperestriction.go b/vendor/github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1/scoperestriction.go new file mode 100644 index 0000000000000..bbeb90273f338 --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1/scoperestriction.go @@ -0,0 +1,34 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ScopeRestrictionApplyConfiguration represents a declarative configuration of the ScopeRestriction type for use +// with apply. +type ScopeRestrictionApplyConfiguration struct { + ExactValues []string `json:"literals,omitempty"` + ClusterRole *ClusterRoleScopeRestrictionApplyConfiguration `json:"clusterRole,omitempty"` +} + +// ScopeRestrictionApplyConfiguration constructs a declarative configuration of the ScopeRestriction type for use with +// apply. +func ScopeRestriction() *ScopeRestrictionApplyConfiguration { + return &ScopeRestrictionApplyConfiguration{} +} + +// WithExactValues adds the given value to the ExactValues field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExactValues field. +func (b *ScopeRestrictionApplyConfiguration) WithExactValues(values ...string) *ScopeRestrictionApplyConfiguration { + for i := range values { + b.ExactValues = append(b.ExactValues, values[i]) + } + return b +} + +// WithClusterRole sets the ClusterRole field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterRole field is set to the value of the last call. +func (b *ScopeRestrictionApplyConfiguration) WithClusterRole(value *ClusterRoleScopeRestrictionApplyConfiguration) *ScopeRestrictionApplyConfiguration { + b.ClusterRole = value + return b +} diff --git a/vendor/github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1/useroauthaccesstoken.go b/vendor/github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1/useroauthaccesstoken.go new file mode 100644 index 0000000000000..d689856efc736 --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1/useroauthaccesstoken.go @@ -0,0 +1,311 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + oauthv1 "github.com/openshift/api/oauth/v1" + internal "github.com/openshift/client-go/oauth/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// UserOAuthAccessTokenApplyConfiguration represents a declarative configuration of the UserOAuthAccessToken type for use +// with apply. +type UserOAuthAccessTokenApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + ClientName *string `json:"clientName,omitempty"` + ExpiresIn *int64 `json:"expiresIn,omitempty"` + Scopes []string `json:"scopes,omitempty"` + RedirectURI *string `json:"redirectURI,omitempty"` + UserName *string `json:"userName,omitempty"` + UserUID *string `json:"userUID,omitempty"` + AuthorizeToken *string `json:"authorizeToken,omitempty"` + RefreshToken *string `json:"refreshToken,omitempty"` + InactivityTimeoutSeconds *int32 `json:"inactivityTimeoutSeconds,omitempty"` +} + +// UserOAuthAccessToken constructs a declarative configuration of the UserOAuthAccessToken type for use with +// apply. +func UserOAuthAccessToken(name string) *UserOAuthAccessTokenApplyConfiguration { + b := &UserOAuthAccessTokenApplyConfiguration{} + b.WithName(name) + b.WithKind("UserOAuthAccessToken") + b.WithAPIVersion("oauth.openshift.io/v1") + return b +} + +// ExtractUserOAuthAccessToken extracts the applied configuration owned by fieldManager from +// userOAuthAccessToken. If no managedFields are found in userOAuthAccessToken for fieldManager, a +// UserOAuthAccessTokenApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// userOAuthAccessToken must be a unmodified UserOAuthAccessToken API object that was retrieved from the Kubernetes API. +// ExtractUserOAuthAccessToken provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractUserOAuthAccessToken(userOAuthAccessToken *oauthv1.UserOAuthAccessToken, fieldManager string) (*UserOAuthAccessTokenApplyConfiguration, error) { + return extractUserOAuthAccessToken(userOAuthAccessToken, fieldManager, "") +} + +// ExtractUserOAuthAccessTokenStatus is the same as ExtractUserOAuthAccessToken except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractUserOAuthAccessTokenStatus(userOAuthAccessToken *oauthv1.UserOAuthAccessToken, fieldManager string) (*UserOAuthAccessTokenApplyConfiguration, error) { + return extractUserOAuthAccessToken(userOAuthAccessToken, fieldManager, "status") +} + +func extractUserOAuthAccessToken(userOAuthAccessToken *oauthv1.UserOAuthAccessToken, fieldManager string, subresource string) (*UserOAuthAccessTokenApplyConfiguration, error) { + b := &UserOAuthAccessTokenApplyConfiguration{} + err := managedfields.ExtractInto(userOAuthAccessToken, internal.Parser().Type("com.github.openshift.api.oauth.v1.UserOAuthAccessToken"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(userOAuthAccessToken.Name) + + b.WithKind("UserOAuthAccessToken") + b.WithAPIVersion("oauth.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *UserOAuthAccessTokenApplyConfiguration) WithKind(value string) *UserOAuthAccessTokenApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *UserOAuthAccessTokenApplyConfiguration) WithAPIVersion(value string) *UserOAuthAccessTokenApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *UserOAuthAccessTokenApplyConfiguration) WithName(value string) *UserOAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *UserOAuthAccessTokenApplyConfiguration) WithGenerateName(value string) *UserOAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *UserOAuthAccessTokenApplyConfiguration) WithNamespace(value string) *UserOAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *UserOAuthAccessTokenApplyConfiguration) WithUID(value types.UID) *UserOAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *UserOAuthAccessTokenApplyConfiguration) WithResourceVersion(value string) *UserOAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *UserOAuthAccessTokenApplyConfiguration) WithGeneration(value int64) *UserOAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *UserOAuthAccessTokenApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *UserOAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *UserOAuthAccessTokenApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *UserOAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *UserOAuthAccessTokenApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *UserOAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *UserOAuthAccessTokenApplyConfiguration) WithLabels(entries map[string]string) *UserOAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *UserOAuthAccessTokenApplyConfiguration) WithAnnotations(entries map[string]string) *UserOAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *UserOAuthAccessTokenApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *UserOAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *UserOAuthAccessTokenApplyConfiguration) WithFinalizers(values ...string) *UserOAuthAccessTokenApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *UserOAuthAccessTokenApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithClientName sets the ClientName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientName field is set to the value of the last call. +func (b *UserOAuthAccessTokenApplyConfiguration) WithClientName(value string) *UserOAuthAccessTokenApplyConfiguration { + b.ClientName = &value + return b +} + +// WithExpiresIn sets the ExpiresIn field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExpiresIn field is set to the value of the last call. +func (b *UserOAuthAccessTokenApplyConfiguration) WithExpiresIn(value int64) *UserOAuthAccessTokenApplyConfiguration { + b.ExpiresIn = &value + return b +} + +// WithScopes adds the given value to the Scopes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Scopes field. +func (b *UserOAuthAccessTokenApplyConfiguration) WithScopes(values ...string) *UserOAuthAccessTokenApplyConfiguration { + for i := range values { + b.Scopes = append(b.Scopes, values[i]) + } + return b +} + +// WithRedirectURI sets the RedirectURI field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RedirectURI field is set to the value of the last call. +func (b *UserOAuthAccessTokenApplyConfiguration) WithRedirectURI(value string) *UserOAuthAccessTokenApplyConfiguration { + b.RedirectURI = &value + return b +} + +// WithUserName sets the UserName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UserName field is set to the value of the last call. +func (b *UserOAuthAccessTokenApplyConfiguration) WithUserName(value string) *UserOAuthAccessTokenApplyConfiguration { + b.UserName = &value + return b +} + +// WithUserUID sets the UserUID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UserUID field is set to the value of the last call. +func (b *UserOAuthAccessTokenApplyConfiguration) WithUserUID(value string) *UserOAuthAccessTokenApplyConfiguration { + b.UserUID = &value + return b +} + +// WithAuthorizeToken sets the AuthorizeToken field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AuthorizeToken field is set to the value of the last call. +func (b *UserOAuthAccessTokenApplyConfiguration) WithAuthorizeToken(value string) *UserOAuthAccessTokenApplyConfiguration { + b.AuthorizeToken = &value + return b +} + +// WithRefreshToken sets the RefreshToken field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RefreshToken field is set to the value of the last call. +func (b *UserOAuthAccessTokenApplyConfiguration) WithRefreshToken(value string) *UserOAuthAccessTokenApplyConfiguration { + b.RefreshToken = &value + return b +} + +// WithInactivityTimeoutSeconds sets the InactivityTimeoutSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the InactivityTimeoutSeconds field is set to the value of the last call. +func (b *UserOAuthAccessTokenApplyConfiguration) WithInactivityTimeoutSeconds(value int32) *UserOAuthAccessTokenApplyConfiguration { + b.InactivityTimeoutSeconds = &value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *UserOAuthAccessTokenApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/oauth/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/oauth/clientset/versioned/clientset.go new file mode 100644 index 0000000000000..e89fa3fc0d668 --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/clientset/versioned/clientset.go @@ -0,0 +1,104 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + fmt "fmt" + http "net/http" + + oauthv1 "github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + OauthV1() oauthv1.OauthV1Interface +} + +// Clientset contains the clients for groups. +type Clientset struct { + *discovery.DiscoveryClient + oauthV1 *oauthv1.OauthV1Client +} + +// OauthV1 retrieves the OauthV1Client +func (c *Clientset) OauthV1() oauthv1.OauthV1Interface { + return c.oauthV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.oauthV1, err = oauthv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.oauthV1 = oauthv1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/oauth/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/client-go/oauth/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000000..14db57a58f8d2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/clientset/versioned/scheme/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/openshift/client-go/oauth/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/oauth/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000000..089f54e34f9c9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/clientset/versioned/scheme/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + oauthv1 "github.com/openshift/api/oauth/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + oauthv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/doc.go b/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/doc.go new file mode 100644 index 0000000000000..225e6b2be34f2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/generated_expansion.go new file mode 100644 index 0000000000000..accf8a50443c6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/generated_expansion.go @@ -0,0 +1,13 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type OAuthAccessTokenExpansion interface{} + +type OAuthAuthorizeTokenExpansion interface{} + +type OAuthClientExpansion interface{} + +type OAuthClientAuthorizationExpansion interface{} + +type UserOAuthAccessTokenExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/oauth_client.go b/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/oauth_client.go new file mode 100644 index 0000000000000..d666ada2c1332 --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/oauth_client.go @@ -0,0 +1,111 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + http "net/http" + + oauthv1 "github.com/openshift/api/oauth/v1" + scheme "github.com/openshift/client-go/oauth/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type OauthV1Interface interface { + RESTClient() rest.Interface + OAuthAccessTokensGetter + OAuthAuthorizeTokensGetter + OAuthClientsGetter + OAuthClientAuthorizationsGetter + UserOAuthAccessTokensGetter +} + +// OauthV1Client is used to interact with features provided by the oauth.openshift.io group. +type OauthV1Client struct { + restClient rest.Interface +} + +func (c *OauthV1Client) OAuthAccessTokens() OAuthAccessTokenInterface { + return newOAuthAccessTokens(c) +} + +func (c *OauthV1Client) OAuthAuthorizeTokens() OAuthAuthorizeTokenInterface { + return newOAuthAuthorizeTokens(c) +} + +func (c *OauthV1Client) OAuthClients() OAuthClientInterface { + return newOAuthClients(c) +} + +func (c *OauthV1Client) OAuthClientAuthorizations() OAuthClientAuthorizationInterface { + return newOAuthClientAuthorizations(c) +} + +func (c *OauthV1Client) UserOAuthAccessTokens() UserOAuthAccessTokenInterface { + return newUserOAuthAccessTokens(c) +} + +// NewForConfig creates a new OauthV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*OauthV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new OauthV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*OauthV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &OauthV1Client{client}, nil +} + +// NewForConfigOrDie creates a new OauthV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *OauthV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new OauthV1Client for the given RESTClient. +func New(c rest.Interface) *OauthV1Client { + return &OauthV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := oauthv1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *OauthV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/oauthaccesstoken.go b/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/oauthaccesstoken.go new file mode 100644 index 0000000000000..f80b391c2fdfb --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/oauthaccesstoken.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + oauthv1 "github.com/openshift/api/oauth/v1" + applyconfigurationsoauthv1 "github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1" + scheme "github.com/openshift/client-go/oauth/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// OAuthAccessTokensGetter has a method to return a OAuthAccessTokenInterface. +// A group's client should implement this interface. +type OAuthAccessTokensGetter interface { + OAuthAccessTokens() OAuthAccessTokenInterface +} + +// OAuthAccessTokenInterface has methods to work with OAuthAccessToken resources. +type OAuthAccessTokenInterface interface { + Create(ctx context.Context, oAuthAccessToken *oauthv1.OAuthAccessToken, opts metav1.CreateOptions) (*oauthv1.OAuthAccessToken, error) + Update(ctx context.Context, oAuthAccessToken *oauthv1.OAuthAccessToken, opts metav1.UpdateOptions) (*oauthv1.OAuthAccessToken, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*oauthv1.OAuthAccessToken, error) + List(ctx context.Context, opts metav1.ListOptions) (*oauthv1.OAuthAccessTokenList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *oauthv1.OAuthAccessToken, err error) + Apply(ctx context.Context, oAuthAccessToken *applyconfigurationsoauthv1.OAuthAccessTokenApplyConfiguration, opts metav1.ApplyOptions) (result *oauthv1.OAuthAccessToken, err error) + OAuthAccessTokenExpansion +} + +// oAuthAccessTokens implements OAuthAccessTokenInterface +type oAuthAccessTokens struct { + *gentype.ClientWithListAndApply[*oauthv1.OAuthAccessToken, *oauthv1.OAuthAccessTokenList, *applyconfigurationsoauthv1.OAuthAccessTokenApplyConfiguration] +} + +// newOAuthAccessTokens returns a OAuthAccessTokens +func newOAuthAccessTokens(c *OauthV1Client) *oAuthAccessTokens { + return &oAuthAccessTokens{ + gentype.NewClientWithListAndApply[*oauthv1.OAuthAccessToken, *oauthv1.OAuthAccessTokenList, *applyconfigurationsoauthv1.OAuthAccessTokenApplyConfiguration]( + "oauthaccesstokens", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *oauthv1.OAuthAccessToken { return &oauthv1.OAuthAccessToken{} }, + func() *oauthv1.OAuthAccessTokenList { return &oauthv1.OAuthAccessTokenList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/oauthauthorizetoken.go b/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/oauthauthorizetoken.go new file mode 100644 index 0000000000000..cf4b67ee3a6ba --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/oauthauthorizetoken.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + oauthv1 "github.com/openshift/api/oauth/v1" + applyconfigurationsoauthv1 "github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1" + scheme "github.com/openshift/client-go/oauth/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// OAuthAuthorizeTokensGetter has a method to return a OAuthAuthorizeTokenInterface. +// A group's client should implement this interface. +type OAuthAuthorizeTokensGetter interface { + OAuthAuthorizeTokens() OAuthAuthorizeTokenInterface +} + +// OAuthAuthorizeTokenInterface has methods to work with OAuthAuthorizeToken resources. +type OAuthAuthorizeTokenInterface interface { + Create(ctx context.Context, oAuthAuthorizeToken *oauthv1.OAuthAuthorizeToken, opts metav1.CreateOptions) (*oauthv1.OAuthAuthorizeToken, error) + Update(ctx context.Context, oAuthAuthorizeToken *oauthv1.OAuthAuthorizeToken, opts metav1.UpdateOptions) (*oauthv1.OAuthAuthorizeToken, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*oauthv1.OAuthAuthorizeToken, error) + List(ctx context.Context, opts metav1.ListOptions) (*oauthv1.OAuthAuthorizeTokenList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *oauthv1.OAuthAuthorizeToken, err error) + Apply(ctx context.Context, oAuthAuthorizeToken *applyconfigurationsoauthv1.OAuthAuthorizeTokenApplyConfiguration, opts metav1.ApplyOptions) (result *oauthv1.OAuthAuthorizeToken, err error) + OAuthAuthorizeTokenExpansion +} + +// oAuthAuthorizeTokens implements OAuthAuthorizeTokenInterface +type oAuthAuthorizeTokens struct { + *gentype.ClientWithListAndApply[*oauthv1.OAuthAuthorizeToken, *oauthv1.OAuthAuthorizeTokenList, *applyconfigurationsoauthv1.OAuthAuthorizeTokenApplyConfiguration] +} + +// newOAuthAuthorizeTokens returns a OAuthAuthorizeTokens +func newOAuthAuthorizeTokens(c *OauthV1Client) *oAuthAuthorizeTokens { + return &oAuthAuthorizeTokens{ + gentype.NewClientWithListAndApply[*oauthv1.OAuthAuthorizeToken, *oauthv1.OAuthAuthorizeTokenList, *applyconfigurationsoauthv1.OAuthAuthorizeTokenApplyConfiguration]( + "oauthauthorizetokens", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *oauthv1.OAuthAuthorizeToken { return &oauthv1.OAuthAuthorizeToken{} }, + func() *oauthv1.OAuthAuthorizeTokenList { return &oauthv1.OAuthAuthorizeTokenList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/oauthclient.go b/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/oauthclient.go new file mode 100644 index 0000000000000..c239a63382105 --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/oauthclient.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + oauthv1 "github.com/openshift/api/oauth/v1" + applyconfigurationsoauthv1 "github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1" + scheme "github.com/openshift/client-go/oauth/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// OAuthClientsGetter has a method to return a OAuthClientInterface. +// A group's client should implement this interface. +type OAuthClientsGetter interface { + OAuthClients() OAuthClientInterface +} + +// OAuthClientInterface has methods to work with OAuthClient resources. +type OAuthClientInterface interface { + Create(ctx context.Context, oAuthClient *oauthv1.OAuthClient, opts metav1.CreateOptions) (*oauthv1.OAuthClient, error) + Update(ctx context.Context, oAuthClient *oauthv1.OAuthClient, opts metav1.UpdateOptions) (*oauthv1.OAuthClient, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*oauthv1.OAuthClient, error) + List(ctx context.Context, opts metav1.ListOptions) (*oauthv1.OAuthClientList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *oauthv1.OAuthClient, err error) + Apply(ctx context.Context, oAuthClient *applyconfigurationsoauthv1.OAuthClientApplyConfiguration, opts metav1.ApplyOptions) (result *oauthv1.OAuthClient, err error) + OAuthClientExpansion +} + +// oAuthClients implements OAuthClientInterface +type oAuthClients struct { + *gentype.ClientWithListAndApply[*oauthv1.OAuthClient, *oauthv1.OAuthClientList, *applyconfigurationsoauthv1.OAuthClientApplyConfiguration] +} + +// newOAuthClients returns a OAuthClients +func newOAuthClients(c *OauthV1Client) *oAuthClients { + return &oAuthClients{ + gentype.NewClientWithListAndApply[*oauthv1.OAuthClient, *oauthv1.OAuthClientList, *applyconfigurationsoauthv1.OAuthClientApplyConfiguration]( + "oauthclients", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *oauthv1.OAuthClient { return &oauthv1.OAuthClient{} }, + func() *oauthv1.OAuthClientList { return &oauthv1.OAuthClientList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/oauthclientauthorization.go b/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/oauthclientauthorization.go new file mode 100644 index 0000000000000..d140bc8d9eef3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/oauthclientauthorization.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + oauthv1 "github.com/openshift/api/oauth/v1" + applyconfigurationsoauthv1 "github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1" + scheme "github.com/openshift/client-go/oauth/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// OAuthClientAuthorizationsGetter has a method to return a OAuthClientAuthorizationInterface. +// A group's client should implement this interface. +type OAuthClientAuthorizationsGetter interface { + OAuthClientAuthorizations() OAuthClientAuthorizationInterface +} + +// OAuthClientAuthorizationInterface has methods to work with OAuthClientAuthorization resources. +type OAuthClientAuthorizationInterface interface { + Create(ctx context.Context, oAuthClientAuthorization *oauthv1.OAuthClientAuthorization, opts metav1.CreateOptions) (*oauthv1.OAuthClientAuthorization, error) + Update(ctx context.Context, oAuthClientAuthorization *oauthv1.OAuthClientAuthorization, opts metav1.UpdateOptions) (*oauthv1.OAuthClientAuthorization, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*oauthv1.OAuthClientAuthorization, error) + List(ctx context.Context, opts metav1.ListOptions) (*oauthv1.OAuthClientAuthorizationList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *oauthv1.OAuthClientAuthorization, err error) + Apply(ctx context.Context, oAuthClientAuthorization *applyconfigurationsoauthv1.OAuthClientAuthorizationApplyConfiguration, opts metav1.ApplyOptions) (result *oauthv1.OAuthClientAuthorization, err error) + OAuthClientAuthorizationExpansion +} + +// oAuthClientAuthorizations implements OAuthClientAuthorizationInterface +type oAuthClientAuthorizations struct { + *gentype.ClientWithListAndApply[*oauthv1.OAuthClientAuthorization, *oauthv1.OAuthClientAuthorizationList, *applyconfigurationsoauthv1.OAuthClientAuthorizationApplyConfiguration] +} + +// newOAuthClientAuthorizations returns a OAuthClientAuthorizations +func newOAuthClientAuthorizations(c *OauthV1Client) *oAuthClientAuthorizations { + return &oAuthClientAuthorizations{ + gentype.NewClientWithListAndApply[*oauthv1.OAuthClientAuthorization, *oauthv1.OAuthClientAuthorizationList, *applyconfigurationsoauthv1.OAuthClientAuthorizationApplyConfiguration]( + "oauthclientauthorizations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *oauthv1.OAuthClientAuthorization { return &oauthv1.OAuthClientAuthorization{} }, + func() *oauthv1.OAuthClientAuthorizationList { return &oauthv1.OAuthClientAuthorizationList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/useroauthaccesstoken.go b/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/useroauthaccesstoken.go new file mode 100644 index 0000000000000..6e632afb59643 --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/useroauthaccesstoken.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + oauthv1 "github.com/openshift/api/oauth/v1" + applyconfigurationsoauthv1 "github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1" + scheme "github.com/openshift/client-go/oauth/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// UserOAuthAccessTokensGetter has a method to return a UserOAuthAccessTokenInterface. +// A group's client should implement this interface. +type UserOAuthAccessTokensGetter interface { + UserOAuthAccessTokens() UserOAuthAccessTokenInterface +} + +// UserOAuthAccessTokenInterface has methods to work with UserOAuthAccessToken resources. +type UserOAuthAccessTokenInterface interface { + Create(ctx context.Context, userOAuthAccessToken *oauthv1.UserOAuthAccessToken, opts metav1.CreateOptions) (*oauthv1.UserOAuthAccessToken, error) + Update(ctx context.Context, userOAuthAccessToken *oauthv1.UserOAuthAccessToken, opts metav1.UpdateOptions) (*oauthv1.UserOAuthAccessToken, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*oauthv1.UserOAuthAccessToken, error) + List(ctx context.Context, opts metav1.ListOptions) (*oauthv1.UserOAuthAccessTokenList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *oauthv1.UserOAuthAccessToken, err error) + Apply(ctx context.Context, userOAuthAccessToken *applyconfigurationsoauthv1.UserOAuthAccessTokenApplyConfiguration, opts metav1.ApplyOptions) (result *oauthv1.UserOAuthAccessToken, err error) + UserOAuthAccessTokenExpansion +} + +// userOAuthAccessTokens implements UserOAuthAccessTokenInterface +type userOAuthAccessTokens struct { + *gentype.ClientWithListAndApply[*oauthv1.UserOAuthAccessToken, *oauthv1.UserOAuthAccessTokenList, *applyconfigurationsoauthv1.UserOAuthAccessTokenApplyConfiguration] +} + +// newUserOAuthAccessTokens returns a UserOAuthAccessTokens +func newUserOAuthAccessTokens(c *OauthV1Client) *userOAuthAccessTokens { + return &userOAuthAccessTokens{ + gentype.NewClientWithListAndApply[*oauthv1.UserOAuthAccessToken, *oauthv1.UserOAuthAccessTokenList, *applyconfigurationsoauthv1.UserOAuthAccessTokenApplyConfiguration]( + "useroauthaccesstokens", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *oauthv1.UserOAuthAccessToken { return &oauthv1.UserOAuthAccessToken{} }, + func() *oauthv1.UserOAuthAccessTokenList { return &oauthv1.UserOAuthAccessTokenList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/oauth/informers/externalversions/factory.go b/vendor/github.com/openshift/client-go/oauth/informers/externalversions/factory.go new file mode 100644 index 0000000000000..150619e56aecd --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/informers/externalversions/factory.go @@ -0,0 +1,246 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/openshift/client-go/oauth/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/oauth/informers/externalversions/internalinterfaces" + oauth "github.com/openshift/client-go/oauth/informers/externalversions/oauth" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + transform cache.TransformFunc + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.shuttingDown { + return + } + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() + f.startedInformers[informerType] = true + } + } +} + +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + + Oauth() oauth.Interface +} + +func (f *sharedInformerFactory) Oauth() oauth.Interface { + return oauth.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/oauth/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/oauth/informers/externalversions/generic.go new file mode 100644 index 0000000000000..f8a351c43029e --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/informers/externalversions/generic.go @@ -0,0 +1,54 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + fmt "fmt" + + v1 "github.com/openshift/api/oauth/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=oauth.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("oauthaccesstokens"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Oauth().V1().OAuthAccessTokens().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("oauthauthorizetokens"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Oauth().V1().OAuthAuthorizeTokens().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("oauthclients"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Oauth().V1().OAuthClients().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("oauthclientauthorizations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Oauth().V1().OAuthClientAuthorizations().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("useroauthaccesstokens"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Oauth().V1().UserOAuthAccessTokens().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/openshift/client-go/oauth/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/openshift/client-go/oauth/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000000..968f52798481f --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,24 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/openshift/client-go/oauth/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/openshift/client-go/oauth/informers/externalversions/oauth/interface.go b/vendor/github.com/openshift/client-go/oauth/informers/externalversions/oauth/interface.go new file mode 100644 index 0000000000000..8afb627c64914 --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/informers/externalversions/oauth/interface.go @@ -0,0 +1,30 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package oauth + +import ( + internalinterfaces "github.com/openshift/client-go/oauth/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/oauth/informers/externalversions/oauth/v1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/oauth/informers/externalversions/oauth/v1/interface.go b/vendor/github.com/openshift/client-go/oauth/informers/externalversions/oauth/v1/interface.go new file mode 100644 index 0000000000000..d32b5cdeff7de --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/informers/externalversions/oauth/v1/interface.go @@ -0,0 +1,57 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/openshift/client-go/oauth/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // OAuthAccessTokens returns a OAuthAccessTokenInformer. + OAuthAccessTokens() OAuthAccessTokenInformer + // OAuthAuthorizeTokens returns a OAuthAuthorizeTokenInformer. + OAuthAuthorizeTokens() OAuthAuthorizeTokenInformer + // OAuthClients returns a OAuthClientInformer. + OAuthClients() OAuthClientInformer + // OAuthClientAuthorizations returns a OAuthClientAuthorizationInformer. + OAuthClientAuthorizations() OAuthClientAuthorizationInformer + // UserOAuthAccessTokens returns a UserOAuthAccessTokenInformer. + UserOAuthAccessTokens() UserOAuthAccessTokenInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// OAuthAccessTokens returns a OAuthAccessTokenInformer. +func (v *version) OAuthAccessTokens() OAuthAccessTokenInformer { + return &oAuthAccessTokenInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// OAuthAuthorizeTokens returns a OAuthAuthorizeTokenInformer. +func (v *version) OAuthAuthorizeTokens() OAuthAuthorizeTokenInformer { + return &oAuthAuthorizeTokenInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// OAuthClients returns a OAuthClientInformer. +func (v *version) OAuthClients() OAuthClientInformer { + return &oAuthClientInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// OAuthClientAuthorizations returns a OAuthClientAuthorizationInformer. +func (v *version) OAuthClientAuthorizations() OAuthClientAuthorizationInformer { + return &oAuthClientAuthorizationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// UserOAuthAccessTokens returns a UserOAuthAccessTokenInformer. +func (v *version) UserOAuthAccessTokens() UserOAuthAccessTokenInformer { + return &userOAuthAccessTokenInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/oauth/informers/externalversions/oauth/v1/oauthaccesstoken.go b/vendor/github.com/openshift/client-go/oauth/informers/externalversions/oauth/v1/oauthaccesstoken.go new file mode 100644 index 0000000000000..d2bfda827cf9d --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/informers/externalversions/oauth/v1/oauthaccesstoken.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apioauthv1 "github.com/openshift/api/oauth/v1" + versioned "github.com/openshift/client-go/oauth/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/oauth/informers/externalversions/internalinterfaces" + oauthv1 "github.com/openshift/client-go/oauth/listers/oauth/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// OAuthAccessTokenInformer provides access to a shared informer and lister for +// OAuthAccessTokens. +type OAuthAccessTokenInformer interface { + Informer() cache.SharedIndexInformer + Lister() oauthv1.OAuthAccessTokenLister +} + +type oAuthAccessTokenInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewOAuthAccessTokenInformer constructs a new informer for OAuthAccessToken type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewOAuthAccessTokenInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredOAuthAccessTokenInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredOAuthAccessTokenInformer constructs a new informer for OAuthAccessToken type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredOAuthAccessTokenInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.OauthV1().OAuthAccessTokens().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.OauthV1().OAuthAccessTokens().Watch(context.TODO(), options) + }, + }, + &apioauthv1.OAuthAccessToken{}, + resyncPeriod, + indexers, + ) +} + +func (f *oAuthAccessTokenInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredOAuthAccessTokenInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *oAuthAccessTokenInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apioauthv1.OAuthAccessToken{}, f.defaultInformer) +} + +func (f *oAuthAccessTokenInformer) Lister() oauthv1.OAuthAccessTokenLister { + return oauthv1.NewOAuthAccessTokenLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/oauth/informers/externalversions/oauth/v1/oauthauthorizetoken.go b/vendor/github.com/openshift/client-go/oauth/informers/externalversions/oauth/v1/oauthauthorizetoken.go new file mode 100644 index 0000000000000..f555a541e7dc4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/informers/externalversions/oauth/v1/oauthauthorizetoken.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apioauthv1 "github.com/openshift/api/oauth/v1" + versioned "github.com/openshift/client-go/oauth/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/oauth/informers/externalversions/internalinterfaces" + oauthv1 "github.com/openshift/client-go/oauth/listers/oauth/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// OAuthAuthorizeTokenInformer provides access to a shared informer and lister for +// OAuthAuthorizeTokens. +type OAuthAuthorizeTokenInformer interface { + Informer() cache.SharedIndexInformer + Lister() oauthv1.OAuthAuthorizeTokenLister +} + +type oAuthAuthorizeTokenInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewOAuthAuthorizeTokenInformer constructs a new informer for OAuthAuthorizeToken type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewOAuthAuthorizeTokenInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredOAuthAuthorizeTokenInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredOAuthAuthorizeTokenInformer constructs a new informer for OAuthAuthorizeToken type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredOAuthAuthorizeTokenInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.OauthV1().OAuthAuthorizeTokens().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.OauthV1().OAuthAuthorizeTokens().Watch(context.TODO(), options) + }, + }, + &apioauthv1.OAuthAuthorizeToken{}, + resyncPeriod, + indexers, + ) +} + +func (f *oAuthAuthorizeTokenInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredOAuthAuthorizeTokenInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *oAuthAuthorizeTokenInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apioauthv1.OAuthAuthorizeToken{}, f.defaultInformer) +} + +func (f *oAuthAuthorizeTokenInformer) Lister() oauthv1.OAuthAuthorizeTokenLister { + return oauthv1.NewOAuthAuthorizeTokenLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/oauth/informers/externalversions/oauth/v1/oauthclient.go b/vendor/github.com/openshift/client-go/oauth/informers/externalversions/oauth/v1/oauthclient.go new file mode 100644 index 0000000000000..488fa28b90ad9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/informers/externalversions/oauth/v1/oauthclient.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apioauthv1 "github.com/openshift/api/oauth/v1" + versioned "github.com/openshift/client-go/oauth/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/oauth/informers/externalversions/internalinterfaces" + oauthv1 "github.com/openshift/client-go/oauth/listers/oauth/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// OAuthClientInformer provides access to a shared informer and lister for +// OAuthClients. +type OAuthClientInformer interface { + Informer() cache.SharedIndexInformer + Lister() oauthv1.OAuthClientLister +} + +type oAuthClientInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewOAuthClientInformer constructs a new informer for OAuthClient type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewOAuthClientInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredOAuthClientInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredOAuthClientInformer constructs a new informer for OAuthClient type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredOAuthClientInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.OauthV1().OAuthClients().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.OauthV1().OAuthClients().Watch(context.TODO(), options) + }, + }, + &apioauthv1.OAuthClient{}, + resyncPeriod, + indexers, + ) +} + +func (f *oAuthClientInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredOAuthClientInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *oAuthClientInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apioauthv1.OAuthClient{}, f.defaultInformer) +} + +func (f *oAuthClientInformer) Lister() oauthv1.OAuthClientLister { + return oauthv1.NewOAuthClientLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/oauth/informers/externalversions/oauth/v1/oauthclientauthorization.go b/vendor/github.com/openshift/client-go/oauth/informers/externalversions/oauth/v1/oauthclientauthorization.go new file mode 100644 index 0000000000000..810b909f6940b --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/informers/externalversions/oauth/v1/oauthclientauthorization.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apioauthv1 "github.com/openshift/api/oauth/v1" + versioned "github.com/openshift/client-go/oauth/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/oauth/informers/externalversions/internalinterfaces" + oauthv1 "github.com/openshift/client-go/oauth/listers/oauth/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// OAuthClientAuthorizationInformer provides access to a shared informer and lister for +// OAuthClientAuthorizations. +type OAuthClientAuthorizationInformer interface { + Informer() cache.SharedIndexInformer + Lister() oauthv1.OAuthClientAuthorizationLister +} + +type oAuthClientAuthorizationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewOAuthClientAuthorizationInformer constructs a new informer for OAuthClientAuthorization type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewOAuthClientAuthorizationInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredOAuthClientAuthorizationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredOAuthClientAuthorizationInformer constructs a new informer for OAuthClientAuthorization type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredOAuthClientAuthorizationInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.OauthV1().OAuthClientAuthorizations().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.OauthV1().OAuthClientAuthorizations().Watch(context.TODO(), options) + }, + }, + &apioauthv1.OAuthClientAuthorization{}, + resyncPeriod, + indexers, + ) +} + +func (f *oAuthClientAuthorizationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredOAuthClientAuthorizationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *oAuthClientAuthorizationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apioauthv1.OAuthClientAuthorization{}, f.defaultInformer) +} + +func (f *oAuthClientAuthorizationInformer) Lister() oauthv1.OAuthClientAuthorizationLister { + return oauthv1.NewOAuthClientAuthorizationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/oauth/informers/externalversions/oauth/v1/useroauthaccesstoken.go b/vendor/github.com/openshift/client-go/oauth/informers/externalversions/oauth/v1/useroauthaccesstoken.go new file mode 100644 index 0000000000000..3190f199a1305 --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/informers/externalversions/oauth/v1/useroauthaccesstoken.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apioauthv1 "github.com/openshift/api/oauth/v1" + versioned "github.com/openshift/client-go/oauth/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/oauth/informers/externalversions/internalinterfaces" + oauthv1 "github.com/openshift/client-go/oauth/listers/oauth/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// UserOAuthAccessTokenInformer provides access to a shared informer and lister for +// UserOAuthAccessTokens. +type UserOAuthAccessTokenInformer interface { + Informer() cache.SharedIndexInformer + Lister() oauthv1.UserOAuthAccessTokenLister +} + +type userOAuthAccessTokenInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewUserOAuthAccessTokenInformer constructs a new informer for UserOAuthAccessToken type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewUserOAuthAccessTokenInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredUserOAuthAccessTokenInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredUserOAuthAccessTokenInformer constructs a new informer for UserOAuthAccessToken type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredUserOAuthAccessTokenInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.OauthV1().UserOAuthAccessTokens().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.OauthV1().UserOAuthAccessTokens().Watch(context.TODO(), options) + }, + }, + &apioauthv1.UserOAuthAccessToken{}, + resyncPeriod, + indexers, + ) +} + +func (f *userOAuthAccessTokenInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredUserOAuthAccessTokenInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *userOAuthAccessTokenInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apioauthv1.UserOAuthAccessToken{}, f.defaultInformer) +} + +func (f *userOAuthAccessTokenInformer) Lister() oauthv1.UserOAuthAccessTokenLister { + return oauthv1.NewUserOAuthAccessTokenLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/oauth/listers/oauth/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/oauth/listers/oauth/v1/expansion_generated.go new file mode 100644 index 0000000000000..1d0d95c0e0a00 --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/listers/oauth/v1/expansion_generated.go @@ -0,0 +1,23 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// OAuthAccessTokenListerExpansion allows custom methods to be added to +// OAuthAccessTokenLister. +type OAuthAccessTokenListerExpansion interface{} + +// OAuthAuthorizeTokenListerExpansion allows custom methods to be added to +// OAuthAuthorizeTokenLister. +type OAuthAuthorizeTokenListerExpansion interface{} + +// OAuthClientListerExpansion allows custom methods to be added to +// OAuthClientLister. +type OAuthClientListerExpansion interface{} + +// OAuthClientAuthorizationListerExpansion allows custom methods to be added to +// OAuthClientAuthorizationLister. +type OAuthClientAuthorizationListerExpansion interface{} + +// UserOAuthAccessTokenListerExpansion allows custom methods to be added to +// UserOAuthAccessTokenLister. +type UserOAuthAccessTokenListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/oauth/listers/oauth/v1/oauthaccesstoken.go b/vendor/github.com/openshift/client-go/oauth/listers/oauth/v1/oauthaccesstoken.go new file mode 100644 index 0000000000000..b829fd68fe51f --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/listers/oauth/v1/oauthaccesstoken.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + oauthv1 "github.com/openshift/api/oauth/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// OAuthAccessTokenLister helps list OAuthAccessTokens. +// All objects returned here must be treated as read-only. +type OAuthAccessTokenLister interface { + // List lists all OAuthAccessTokens in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*oauthv1.OAuthAccessToken, err error) + // Get retrieves the OAuthAccessToken from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*oauthv1.OAuthAccessToken, error) + OAuthAccessTokenListerExpansion +} + +// oAuthAccessTokenLister implements the OAuthAccessTokenLister interface. +type oAuthAccessTokenLister struct { + listers.ResourceIndexer[*oauthv1.OAuthAccessToken] +} + +// NewOAuthAccessTokenLister returns a new OAuthAccessTokenLister. +func NewOAuthAccessTokenLister(indexer cache.Indexer) OAuthAccessTokenLister { + return &oAuthAccessTokenLister{listers.New[*oauthv1.OAuthAccessToken](indexer, oauthv1.Resource("oauthaccesstoken"))} +} diff --git a/vendor/github.com/openshift/client-go/oauth/listers/oauth/v1/oauthauthorizetoken.go b/vendor/github.com/openshift/client-go/oauth/listers/oauth/v1/oauthauthorizetoken.go new file mode 100644 index 0000000000000..157a7a3b37cdb --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/listers/oauth/v1/oauthauthorizetoken.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + oauthv1 "github.com/openshift/api/oauth/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// OAuthAuthorizeTokenLister helps list OAuthAuthorizeTokens. +// All objects returned here must be treated as read-only. +type OAuthAuthorizeTokenLister interface { + // List lists all OAuthAuthorizeTokens in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*oauthv1.OAuthAuthorizeToken, err error) + // Get retrieves the OAuthAuthorizeToken from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*oauthv1.OAuthAuthorizeToken, error) + OAuthAuthorizeTokenListerExpansion +} + +// oAuthAuthorizeTokenLister implements the OAuthAuthorizeTokenLister interface. +type oAuthAuthorizeTokenLister struct { + listers.ResourceIndexer[*oauthv1.OAuthAuthorizeToken] +} + +// NewOAuthAuthorizeTokenLister returns a new OAuthAuthorizeTokenLister. +func NewOAuthAuthorizeTokenLister(indexer cache.Indexer) OAuthAuthorizeTokenLister { + return &oAuthAuthorizeTokenLister{listers.New[*oauthv1.OAuthAuthorizeToken](indexer, oauthv1.Resource("oauthauthorizetoken"))} +} diff --git a/vendor/github.com/openshift/client-go/oauth/listers/oauth/v1/oauthclient.go b/vendor/github.com/openshift/client-go/oauth/listers/oauth/v1/oauthclient.go new file mode 100644 index 0000000000000..7d1e1d0b65563 --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/listers/oauth/v1/oauthclient.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + oauthv1 "github.com/openshift/api/oauth/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// OAuthClientLister helps list OAuthClients. +// All objects returned here must be treated as read-only. +type OAuthClientLister interface { + // List lists all OAuthClients in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*oauthv1.OAuthClient, err error) + // Get retrieves the OAuthClient from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*oauthv1.OAuthClient, error) + OAuthClientListerExpansion +} + +// oAuthClientLister implements the OAuthClientLister interface. +type oAuthClientLister struct { + listers.ResourceIndexer[*oauthv1.OAuthClient] +} + +// NewOAuthClientLister returns a new OAuthClientLister. +func NewOAuthClientLister(indexer cache.Indexer) OAuthClientLister { + return &oAuthClientLister{listers.New[*oauthv1.OAuthClient](indexer, oauthv1.Resource("oauthclient"))} +} diff --git a/vendor/github.com/openshift/client-go/oauth/listers/oauth/v1/oauthclientauthorization.go b/vendor/github.com/openshift/client-go/oauth/listers/oauth/v1/oauthclientauthorization.go new file mode 100644 index 0000000000000..1237edb9e6925 --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/listers/oauth/v1/oauthclientauthorization.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + oauthv1 "github.com/openshift/api/oauth/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// OAuthClientAuthorizationLister helps list OAuthClientAuthorizations. +// All objects returned here must be treated as read-only. +type OAuthClientAuthorizationLister interface { + // List lists all OAuthClientAuthorizations in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*oauthv1.OAuthClientAuthorization, err error) + // Get retrieves the OAuthClientAuthorization from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*oauthv1.OAuthClientAuthorization, error) + OAuthClientAuthorizationListerExpansion +} + +// oAuthClientAuthorizationLister implements the OAuthClientAuthorizationLister interface. +type oAuthClientAuthorizationLister struct { + listers.ResourceIndexer[*oauthv1.OAuthClientAuthorization] +} + +// NewOAuthClientAuthorizationLister returns a new OAuthClientAuthorizationLister. +func NewOAuthClientAuthorizationLister(indexer cache.Indexer) OAuthClientAuthorizationLister { + return &oAuthClientAuthorizationLister{listers.New[*oauthv1.OAuthClientAuthorization](indexer, oauthv1.Resource("oauthclientauthorization"))} +} diff --git a/vendor/github.com/openshift/client-go/oauth/listers/oauth/v1/useroauthaccesstoken.go b/vendor/github.com/openshift/client-go/oauth/listers/oauth/v1/useroauthaccesstoken.go new file mode 100644 index 0000000000000..f45ab65581452 --- /dev/null +++ b/vendor/github.com/openshift/client-go/oauth/listers/oauth/v1/useroauthaccesstoken.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + oauthv1 "github.com/openshift/api/oauth/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// UserOAuthAccessTokenLister helps list UserOAuthAccessTokens. +// All objects returned here must be treated as read-only. +type UserOAuthAccessTokenLister interface { + // List lists all UserOAuthAccessTokens in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*oauthv1.UserOAuthAccessToken, err error) + // Get retrieves the UserOAuthAccessToken from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*oauthv1.UserOAuthAccessToken, error) + UserOAuthAccessTokenListerExpansion +} + +// userOAuthAccessTokenLister implements the UserOAuthAccessTokenLister interface. +type userOAuthAccessTokenLister struct { + listers.ResourceIndexer[*oauthv1.UserOAuthAccessToken] +} + +// NewUserOAuthAccessTokenLister returns a new UserOAuthAccessTokenLister. +func NewUserOAuthAccessTokenLister(indexer cache.Indexer) UserOAuthAccessTokenLister { + return &userOAuthAccessTokenLister{listers.New[*oauthv1.UserOAuthAccessToken](indexer, oauthv1.Resource("useroauthaccesstoken"))} +} diff --git a/vendor/github.com/openshift/client-go/quota/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/quota/applyconfigurations/internal/internal.go new file mode 100644 index 0000000000000..003565e7a3da8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/quota/applyconfigurations/internal/internal.go @@ -0,0 +1,331 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package internal + +import ( + fmt "fmt" + sync "sync" + + typed "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +func Parser() *typed.Parser { + parserOnce.Do(func() { + var err error + parser, err = typed.NewParser(schemaYAML) + if err != nil { + panic(fmt.Sprintf("Failed to parse schema: %v", err)) + } + }) + return parser +} + +var parserOnce sync.Once +var parser *typed.Parser +var schemaYAML = typed.YAMLObject(`types: +- name: com.github.openshift.api.quota.v1.ClusterResourceQuota + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.quota.v1.ClusterResourceQuotaSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.quota.v1.ClusterResourceQuotaStatus + default: {} +- name: com.github.openshift.api.quota.v1.ClusterResourceQuotaSelector + map: + fields: + - name: annotations + type: + map: + elementType: + scalar: string + - name: labels + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector +- name: com.github.openshift.api.quota.v1.ClusterResourceQuotaSpec + map: + fields: + - name: quota + type: + namedType: io.k8s.api.core.v1.ResourceQuotaSpec + default: {} + - name: selector + type: + namedType: com.github.openshift.api.quota.v1.ClusterResourceQuotaSelector + default: {} +- name: com.github.openshift.api.quota.v1.ClusterResourceQuotaStatus + map: + fields: + - name: namespaces + type: + list: + elementType: + namedType: com.github.openshift.api.quota.v1.ResourceQuotaStatusByNamespace + elementRelationship: atomic + - name: total + type: + namedType: io.k8s.api.core.v1.ResourceQuotaStatus + default: {} +- name: com.github.openshift.api.quota.v1.ResourceQuotaStatusByNamespace + map: + fields: + - name: namespace + type: + scalar: string + default: "" + - name: status + type: + namedType: io.k8s.api.core.v1.ResourceQuotaStatus + default: {} +- name: io.k8s.api.core.v1.ResourceQuotaSpec + map: + fields: + - name: hard + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: scopeSelector + type: + namedType: io.k8s.api.core.v1.ScopeSelector + - name: scopes + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.core.v1.ResourceQuotaStatus + map: + fields: + - name: hard + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: used + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.core.v1.ScopeSelector + map: + fields: + - name: matchExpressions + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ScopedResourceSelectorRequirement + elementRelationship: atomic + elementRelationship: atomic +- name: io.k8s.api.core.v1.ScopedResourceSelectorRequirement + map: + fields: + - name: operator + type: + scalar: string + default: "" + - name: scopeName + type: + scalar: string + default: "" + - name: values + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.api.resource.Quantity + scalar: untyped +- name: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + map: + fields: + - name: matchExpressions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement + elementRelationship: atomic + - name: matchLabels + type: + map: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: operator + type: + scalar: string + default: "" + - name: values + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldsType + type: + scalar: string + - name: fieldsV1 + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + - name: manager + type: + scalar: string + - name: operation + type: + scalar: string + - name: subresource + type: + scalar: string + - name: time + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + map: + fields: + - name: annotations + type: + map: + elementType: + scalar: string + - name: creationTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: deletionGracePeriodSeconds + type: + scalar: numeric + - name: deletionTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: finalizers + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: generateName + type: + scalar: string + - name: generation + type: + scalar: numeric + - name: labels + type: + map: + elementType: + scalar: string + - name: managedFields + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + elementRelationship: atomic + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: ownerReferences + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + elementRelationship: associative + keys: + - uid + - name: resourceVersion + type: + scalar: string + - name: selfLink + type: + scalar: string + - name: uid + type: + scalar: string +- name: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + map: + fields: + - name: apiVersion + type: + scalar: string + default: "" + - name: blockOwnerDeletion + type: + scalar: boolean + - name: controller + type: + scalar: boolean + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: uid + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Time + scalar: untyped +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +- name: __untyped_deduced_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +`) diff --git a/vendor/github.com/openshift/client-go/quota/applyconfigurations/quota/v1/clusterresourcequota.go b/vendor/github.com/openshift/client-go/quota/applyconfigurations/quota/v1/clusterresourcequota.go new file mode 100644 index 0000000000000..c279b2788757c --- /dev/null +++ b/vendor/github.com/openshift/client-go/quota/applyconfigurations/quota/v1/clusterresourcequota.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + quotav1 "github.com/openshift/api/quota/v1" + internal "github.com/openshift/client-go/quota/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterResourceQuotaApplyConfiguration represents a declarative configuration of the ClusterResourceQuota type for use +// with apply. +type ClusterResourceQuotaApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ClusterResourceQuotaSpecApplyConfiguration `json:"spec,omitempty"` + Status *ClusterResourceQuotaStatusApplyConfiguration `json:"status,omitempty"` +} + +// ClusterResourceQuota constructs a declarative configuration of the ClusterResourceQuota type for use with +// apply. +func ClusterResourceQuota(name string) *ClusterResourceQuotaApplyConfiguration { + b := &ClusterResourceQuotaApplyConfiguration{} + b.WithName(name) + b.WithKind("ClusterResourceQuota") + b.WithAPIVersion("quota.openshift.io/v1") + return b +} + +// ExtractClusterResourceQuota extracts the applied configuration owned by fieldManager from +// clusterResourceQuota. If no managedFields are found in clusterResourceQuota for fieldManager, a +// ClusterResourceQuotaApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// clusterResourceQuota must be a unmodified ClusterResourceQuota API object that was retrieved from the Kubernetes API. +// ExtractClusterResourceQuota provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractClusterResourceQuota(clusterResourceQuota *quotav1.ClusterResourceQuota, fieldManager string) (*ClusterResourceQuotaApplyConfiguration, error) { + return extractClusterResourceQuota(clusterResourceQuota, fieldManager, "") +} + +// ExtractClusterResourceQuotaStatus is the same as ExtractClusterResourceQuota except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractClusterResourceQuotaStatus(clusterResourceQuota *quotav1.ClusterResourceQuota, fieldManager string) (*ClusterResourceQuotaApplyConfiguration, error) { + return extractClusterResourceQuota(clusterResourceQuota, fieldManager, "status") +} + +func extractClusterResourceQuota(clusterResourceQuota *quotav1.ClusterResourceQuota, fieldManager string, subresource string) (*ClusterResourceQuotaApplyConfiguration, error) { + b := &ClusterResourceQuotaApplyConfiguration{} + err := managedfields.ExtractInto(clusterResourceQuota, internal.Parser().Type("com.github.openshift.api.quota.v1.ClusterResourceQuota"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(clusterResourceQuota.Name) + + b.WithKind("ClusterResourceQuota") + b.WithAPIVersion("quota.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ClusterResourceQuotaApplyConfiguration) WithKind(value string) *ClusterResourceQuotaApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ClusterResourceQuotaApplyConfiguration) WithAPIVersion(value string) *ClusterResourceQuotaApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ClusterResourceQuotaApplyConfiguration) WithName(value string) *ClusterResourceQuotaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ClusterResourceQuotaApplyConfiguration) WithGenerateName(value string) *ClusterResourceQuotaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ClusterResourceQuotaApplyConfiguration) WithNamespace(value string) *ClusterResourceQuotaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ClusterResourceQuotaApplyConfiguration) WithUID(value types.UID) *ClusterResourceQuotaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ClusterResourceQuotaApplyConfiguration) WithResourceVersion(value string) *ClusterResourceQuotaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ClusterResourceQuotaApplyConfiguration) WithGeneration(value int64) *ClusterResourceQuotaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ClusterResourceQuotaApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ClusterResourceQuotaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ClusterResourceQuotaApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ClusterResourceQuotaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ClusterResourceQuotaApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterResourceQuotaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ClusterResourceQuotaApplyConfiguration) WithLabels(entries map[string]string) *ClusterResourceQuotaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ClusterResourceQuotaApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterResourceQuotaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ClusterResourceQuotaApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ClusterResourceQuotaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ClusterResourceQuotaApplyConfiguration) WithFinalizers(values ...string) *ClusterResourceQuotaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ClusterResourceQuotaApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ClusterResourceQuotaApplyConfiguration) WithSpec(value *ClusterResourceQuotaSpecApplyConfiguration) *ClusterResourceQuotaApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ClusterResourceQuotaApplyConfiguration) WithStatus(value *ClusterResourceQuotaStatusApplyConfiguration) *ClusterResourceQuotaApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterResourceQuotaApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/quota/applyconfigurations/quota/v1/clusterresourcequotaselector.go b/vendor/github.com/openshift/client-go/quota/applyconfigurations/quota/v1/clusterresourcequotaselector.go new file mode 100644 index 0000000000000..a759a1b9bc0f4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/quota/applyconfigurations/quota/v1/clusterresourcequotaselector.go @@ -0,0 +1,42 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterResourceQuotaSelectorApplyConfiguration represents a declarative configuration of the ClusterResourceQuotaSelector type for use +// with apply. +type ClusterResourceQuotaSelectorApplyConfiguration struct { + LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labels,omitempty"` + AnnotationSelector map[string]string `json:"annotations,omitempty"` +} + +// ClusterResourceQuotaSelectorApplyConfiguration constructs a declarative configuration of the ClusterResourceQuotaSelector type for use with +// apply. +func ClusterResourceQuotaSelector() *ClusterResourceQuotaSelectorApplyConfiguration { + return &ClusterResourceQuotaSelectorApplyConfiguration{} +} + +// WithLabelSelector sets the LabelSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelSelector field is set to the value of the last call. +func (b *ClusterResourceQuotaSelectorApplyConfiguration) WithLabelSelector(value *metav1.LabelSelectorApplyConfiguration) *ClusterResourceQuotaSelectorApplyConfiguration { + b.LabelSelector = value + return b +} + +// WithAnnotationSelector puts the entries into the AnnotationSelector field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the AnnotationSelector field, +// overwriting an existing map entries in AnnotationSelector field with the same key. +func (b *ClusterResourceQuotaSelectorApplyConfiguration) WithAnnotationSelector(entries map[string]string) *ClusterResourceQuotaSelectorApplyConfiguration { + if b.AnnotationSelector == nil && len(entries) > 0 { + b.AnnotationSelector = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.AnnotationSelector[k] = v + } + return b +} diff --git a/vendor/github.com/openshift/client-go/quota/applyconfigurations/quota/v1/clusterresourcequotaspec.go b/vendor/github.com/openshift/client-go/quota/applyconfigurations/quota/v1/clusterresourcequotaspec.go new file mode 100644 index 0000000000000..5f64a692e97f5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/quota/applyconfigurations/quota/v1/clusterresourcequotaspec.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// ClusterResourceQuotaSpecApplyConfiguration represents a declarative configuration of the ClusterResourceQuotaSpec type for use +// with apply. +type ClusterResourceQuotaSpecApplyConfiguration struct { + Selector *ClusterResourceQuotaSelectorApplyConfiguration `json:"selector,omitempty"` + Quota *corev1.ResourceQuotaSpec `json:"quota,omitempty"` +} + +// ClusterResourceQuotaSpecApplyConfiguration constructs a declarative configuration of the ClusterResourceQuotaSpec type for use with +// apply. +func ClusterResourceQuotaSpec() *ClusterResourceQuotaSpecApplyConfiguration { + return &ClusterResourceQuotaSpecApplyConfiguration{} +} + +// WithSelector sets the Selector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Selector field is set to the value of the last call. +func (b *ClusterResourceQuotaSpecApplyConfiguration) WithSelector(value *ClusterResourceQuotaSelectorApplyConfiguration) *ClusterResourceQuotaSpecApplyConfiguration { + b.Selector = value + return b +} + +// WithQuota sets the Quota field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Quota field is set to the value of the last call. +func (b *ClusterResourceQuotaSpecApplyConfiguration) WithQuota(value corev1.ResourceQuotaSpec) *ClusterResourceQuotaSpecApplyConfiguration { + b.Quota = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/quota/applyconfigurations/quota/v1/clusterresourcequotastatus.go b/vendor/github.com/openshift/client-go/quota/applyconfigurations/quota/v1/clusterresourcequotastatus.go new file mode 100644 index 0000000000000..bd5dbca4d5944 --- /dev/null +++ b/vendor/github.com/openshift/client-go/quota/applyconfigurations/quota/v1/clusterresourcequotastatus.go @@ -0,0 +1,37 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + quotav1 "github.com/openshift/api/quota/v1" + corev1 "k8s.io/api/core/v1" +) + +// ClusterResourceQuotaStatusApplyConfiguration represents a declarative configuration of the ClusterResourceQuotaStatus type for use +// with apply. +type ClusterResourceQuotaStatusApplyConfiguration struct { + Total *corev1.ResourceQuotaStatus `json:"total,omitempty"` + Namespaces *quotav1.ResourceQuotasStatusByNamespace `json:"namespaces,omitempty"` +} + +// ClusterResourceQuotaStatusApplyConfiguration constructs a declarative configuration of the ClusterResourceQuotaStatus type for use with +// apply. +func ClusterResourceQuotaStatus() *ClusterResourceQuotaStatusApplyConfiguration { + return &ClusterResourceQuotaStatusApplyConfiguration{} +} + +// WithTotal sets the Total field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Total field is set to the value of the last call. +func (b *ClusterResourceQuotaStatusApplyConfiguration) WithTotal(value corev1.ResourceQuotaStatus) *ClusterResourceQuotaStatusApplyConfiguration { + b.Total = &value + return b +} + +// WithNamespaces sets the Namespaces field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespaces field is set to the value of the last call. +func (b *ClusterResourceQuotaStatusApplyConfiguration) WithNamespaces(value quotav1.ResourceQuotasStatusByNamespace) *ClusterResourceQuotaStatusApplyConfiguration { + b.Namespaces = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/quota/applyconfigurations/quota/v1/resourcequotastatusbynamespace.go b/vendor/github.com/openshift/client-go/quota/applyconfigurations/quota/v1/resourcequotastatusbynamespace.go new file mode 100644 index 0000000000000..973ba124f256d --- /dev/null +++ b/vendor/github.com/openshift/client-go/quota/applyconfigurations/quota/v1/resourcequotastatusbynamespace.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// ResourceQuotaStatusByNamespaceApplyConfiguration represents a declarative configuration of the ResourceQuotaStatusByNamespace type for use +// with apply. +type ResourceQuotaStatusByNamespaceApplyConfiguration struct { + Namespace *string `json:"namespace,omitempty"` + Status *corev1.ResourceQuotaStatus `json:"status,omitempty"` +} + +// ResourceQuotaStatusByNamespaceApplyConfiguration constructs a declarative configuration of the ResourceQuotaStatusByNamespace type for use with +// apply. +func ResourceQuotaStatusByNamespace() *ResourceQuotaStatusByNamespaceApplyConfiguration { + return &ResourceQuotaStatusByNamespaceApplyConfiguration{} +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ResourceQuotaStatusByNamespaceApplyConfiguration) WithNamespace(value string) *ResourceQuotaStatusByNamespaceApplyConfiguration { + b.Namespace = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ResourceQuotaStatusByNamespaceApplyConfiguration) WithStatus(value corev1.ResourceQuotaStatus) *ResourceQuotaStatusByNamespaceApplyConfiguration { + b.Status = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/quota/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/quota/clientset/versioned/clientset.go new file mode 100644 index 0000000000000..e3a6e4f1387e0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/quota/clientset/versioned/clientset.go @@ -0,0 +1,104 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + fmt "fmt" + http "net/http" + + quotav1 "github.com/openshift/client-go/quota/clientset/versioned/typed/quota/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + QuotaV1() quotav1.QuotaV1Interface +} + +// Clientset contains the clients for groups. +type Clientset struct { + *discovery.DiscoveryClient + quotaV1 *quotav1.QuotaV1Client +} + +// QuotaV1 retrieves the QuotaV1Client +func (c *Clientset) QuotaV1() quotav1.QuotaV1Interface { + return c.quotaV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.quotaV1, err = quotav1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.quotaV1 = quotav1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/quota/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/client-go/quota/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000000..14db57a58f8d2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/quota/clientset/versioned/scheme/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/openshift/client-go/quota/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/quota/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000000..381033d1822d7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/quota/clientset/versioned/scheme/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + quotav1 "github.com/openshift/api/quota/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + quotav1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/openshift/client-go/quota/clientset/versioned/typed/quota/v1/appliedclusterresourcequota.go b/vendor/github.com/openshift/client-go/quota/clientset/versioned/typed/quota/v1/appliedclusterresourcequota.go new file mode 100644 index 0000000000000..61015404ddf0c --- /dev/null +++ b/vendor/github.com/openshift/client-go/quota/clientset/versioned/typed/quota/v1/appliedclusterresourcequota.go @@ -0,0 +1,44 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + quotav1 "github.com/openshift/api/quota/v1" + scheme "github.com/openshift/client-go/quota/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" +) + +// AppliedClusterResourceQuotasGetter has a method to return a AppliedClusterResourceQuotaInterface. +// A group's client should implement this interface. +type AppliedClusterResourceQuotasGetter interface { + AppliedClusterResourceQuotas(namespace string) AppliedClusterResourceQuotaInterface +} + +// AppliedClusterResourceQuotaInterface has methods to work with AppliedClusterResourceQuota resources. +type AppliedClusterResourceQuotaInterface interface { + Get(ctx context.Context, name string, opts metav1.GetOptions) (*quotav1.AppliedClusterResourceQuota, error) + List(ctx context.Context, opts metav1.ListOptions) (*quotav1.AppliedClusterResourceQuotaList, error) + AppliedClusterResourceQuotaExpansion +} + +// appliedClusterResourceQuotas implements AppliedClusterResourceQuotaInterface +type appliedClusterResourceQuotas struct { + *gentype.ClientWithList[*quotav1.AppliedClusterResourceQuota, *quotav1.AppliedClusterResourceQuotaList] +} + +// newAppliedClusterResourceQuotas returns a AppliedClusterResourceQuotas +func newAppliedClusterResourceQuotas(c *QuotaV1Client, namespace string) *appliedClusterResourceQuotas { + return &appliedClusterResourceQuotas{ + gentype.NewClientWithList[*quotav1.AppliedClusterResourceQuota, *quotav1.AppliedClusterResourceQuotaList]( + "appliedclusterresourcequotas", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *quotav1.AppliedClusterResourceQuota { return "av1.AppliedClusterResourceQuota{} }, + func() *quotav1.AppliedClusterResourceQuotaList { return "av1.AppliedClusterResourceQuotaList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/quota/clientset/versioned/typed/quota/v1/clusterresourcequota.go b/vendor/github.com/openshift/client-go/quota/clientset/versioned/typed/quota/v1/clusterresourcequota.go new file mode 100644 index 0000000000000..20f25acfdfaa0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/quota/clientset/versioned/typed/quota/v1/clusterresourcequota.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + quotav1 "github.com/openshift/api/quota/v1" + applyconfigurationsquotav1 "github.com/openshift/client-go/quota/applyconfigurations/quota/v1" + scheme "github.com/openshift/client-go/quota/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ClusterResourceQuotasGetter has a method to return a ClusterResourceQuotaInterface. +// A group's client should implement this interface. +type ClusterResourceQuotasGetter interface { + ClusterResourceQuotas() ClusterResourceQuotaInterface +} + +// ClusterResourceQuotaInterface has methods to work with ClusterResourceQuota resources. +type ClusterResourceQuotaInterface interface { + Create(ctx context.Context, clusterResourceQuota *quotav1.ClusterResourceQuota, opts metav1.CreateOptions) (*quotav1.ClusterResourceQuota, error) + Update(ctx context.Context, clusterResourceQuota *quotav1.ClusterResourceQuota, opts metav1.UpdateOptions) (*quotav1.ClusterResourceQuota, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, clusterResourceQuota *quotav1.ClusterResourceQuota, opts metav1.UpdateOptions) (*quotav1.ClusterResourceQuota, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*quotav1.ClusterResourceQuota, error) + List(ctx context.Context, opts metav1.ListOptions) (*quotav1.ClusterResourceQuotaList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *quotav1.ClusterResourceQuota, err error) + Apply(ctx context.Context, clusterResourceQuota *applyconfigurationsquotav1.ClusterResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *quotav1.ClusterResourceQuota, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, clusterResourceQuota *applyconfigurationsquotav1.ClusterResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *quotav1.ClusterResourceQuota, err error) + ClusterResourceQuotaExpansion +} + +// clusterResourceQuotas implements ClusterResourceQuotaInterface +type clusterResourceQuotas struct { + *gentype.ClientWithListAndApply[*quotav1.ClusterResourceQuota, *quotav1.ClusterResourceQuotaList, *applyconfigurationsquotav1.ClusterResourceQuotaApplyConfiguration] +} + +// newClusterResourceQuotas returns a ClusterResourceQuotas +func newClusterResourceQuotas(c *QuotaV1Client) *clusterResourceQuotas { + return &clusterResourceQuotas{ + gentype.NewClientWithListAndApply[*quotav1.ClusterResourceQuota, *quotav1.ClusterResourceQuotaList, *applyconfigurationsquotav1.ClusterResourceQuotaApplyConfiguration]( + "clusterresourcequotas", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *quotav1.ClusterResourceQuota { return "av1.ClusterResourceQuota{} }, + func() *quotav1.ClusterResourceQuotaList { return "av1.ClusterResourceQuotaList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/quota/clientset/versioned/typed/quota/v1/doc.go b/vendor/github.com/openshift/client-go/quota/clientset/versioned/typed/quota/v1/doc.go new file mode 100644 index 0000000000000..225e6b2be34f2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/quota/clientset/versioned/typed/quota/v1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/openshift/client-go/quota/clientset/versioned/typed/quota/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/quota/clientset/versioned/typed/quota/v1/generated_expansion.go new file mode 100644 index 0000000000000..c79f16776b035 --- /dev/null +++ b/vendor/github.com/openshift/client-go/quota/clientset/versioned/typed/quota/v1/generated_expansion.go @@ -0,0 +1,7 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type AppliedClusterResourceQuotaExpansion interface{} + +type ClusterResourceQuotaExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/quota/clientset/versioned/typed/quota/v1/quota_client.go b/vendor/github.com/openshift/client-go/quota/clientset/versioned/typed/quota/v1/quota_client.go new file mode 100644 index 0000000000000..3ff8154774557 --- /dev/null +++ b/vendor/github.com/openshift/client-go/quota/clientset/versioned/typed/quota/v1/quota_client.go @@ -0,0 +1,96 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + http "net/http" + + quotav1 "github.com/openshift/api/quota/v1" + scheme "github.com/openshift/client-go/quota/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type QuotaV1Interface interface { + RESTClient() rest.Interface + AppliedClusterResourceQuotasGetter + ClusterResourceQuotasGetter +} + +// QuotaV1Client is used to interact with features provided by the quota.openshift.io group. +type QuotaV1Client struct { + restClient rest.Interface +} + +func (c *QuotaV1Client) AppliedClusterResourceQuotas(namespace string) AppliedClusterResourceQuotaInterface { + return newAppliedClusterResourceQuotas(c, namespace) +} + +func (c *QuotaV1Client) ClusterResourceQuotas() ClusterResourceQuotaInterface { + return newClusterResourceQuotas(c) +} + +// NewForConfig creates a new QuotaV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*QuotaV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new QuotaV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*QuotaV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &QuotaV1Client{client}, nil +} + +// NewForConfigOrDie creates a new QuotaV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *QuotaV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new QuotaV1Client for the given RESTClient. +func New(c rest.Interface) *QuotaV1Client { + return &QuotaV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := quotav1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *QuotaV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/client-go/quota/informers/externalversions/factory.go b/vendor/github.com/openshift/client-go/quota/informers/externalversions/factory.go new file mode 100644 index 0000000000000..4e04dffba1878 --- /dev/null +++ b/vendor/github.com/openshift/client-go/quota/informers/externalversions/factory.go @@ -0,0 +1,246 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/openshift/client-go/quota/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/quota/informers/externalversions/internalinterfaces" + quota "github.com/openshift/client-go/quota/informers/externalversions/quota" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + transform cache.TransformFunc + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.shuttingDown { + return + } + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() + f.startedInformers[informerType] = true + } + } +} + +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + + Quota() quota.Interface +} + +func (f *sharedInformerFactory) Quota() quota.Interface { + return quota.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/quota/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/quota/informers/externalversions/generic.go new file mode 100644 index 0000000000000..54a2e3887c1d8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/quota/informers/externalversions/generic.go @@ -0,0 +1,46 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + fmt "fmt" + + v1 "github.com/openshift/api/quota/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=quota.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("clusterresourcequotas"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Quota().V1().ClusterResourceQuotas().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/openshift/client-go/quota/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/openshift/client-go/quota/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000000..3e3f07fc30976 --- /dev/null +++ b/vendor/github.com/openshift/client-go/quota/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,24 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/openshift/client-go/quota/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/openshift/client-go/quota/informers/externalversions/quota/interface.go b/vendor/github.com/openshift/client-go/quota/informers/externalversions/quota/interface.go new file mode 100644 index 0000000000000..5df3c875230ff --- /dev/null +++ b/vendor/github.com/openshift/client-go/quota/informers/externalversions/quota/interface.go @@ -0,0 +1,30 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package quota + +import ( + internalinterfaces "github.com/openshift/client-go/quota/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/quota/informers/externalversions/quota/v1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/quota/informers/externalversions/quota/v1/clusterresourcequota.go b/vendor/github.com/openshift/client-go/quota/informers/externalversions/quota/v1/clusterresourcequota.go new file mode 100644 index 0000000000000..df790ff94ac87 --- /dev/null +++ b/vendor/github.com/openshift/client-go/quota/informers/externalversions/quota/v1/clusterresourcequota.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiquotav1 "github.com/openshift/api/quota/v1" + versioned "github.com/openshift/client-go/quota/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/quota/informers/externalversions/internalinterfaces" + quotav1 "github.com/openshift/client-go/quota/listers/quota/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterResourceQuotaInformer provides access to a shared informer and lister for +// ClusterResourceQuotas. +type ClusterResourceQuotaInformer interface { + Informer() cache.SharedIndexInformer + Lister() quotav1.ClusterResourceQuotaLister +} + +type clusterResourceQuotaInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterResourceQuotaInformer constructs a new informer for ClusterResourceQuota type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterResourceQuotaInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterResourceQuotaInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterResourceQuotaInformer constructs a new informer for ClusterResourceQuota type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterResourceQuotaInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.QuotaV1().ClusterResourceQuotas().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.QuotaV1().ClusterResourceQuotas().Watch(context.TODO(), options) + }, + }, + &apiquotav1.ClusterResourceQuota{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterResourceQuotaInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterResourceQuotaInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterResourceQuotaInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiquotav1.ClusterResourceQuota{}, f.defaultInformer) +} + +func (f *clusterResourceQuotaInformer) Lister() quotav1.ClusterResourceQuotaLister { + return quotav1.NewClusterResourceQuotaLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/quota/informers/externalversions/quota/v1/interface.go b/vendor/github.com/openshift/client-go/quota/informers/externalversions/quota/v1/interface.go new file mode 100644 index 0000000000000..600eccd5109f4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/quota/informers/externalversions/quota/v1/interface.go @@ -0,0 +1,29 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/openshift/client-go/quota/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ClusterResourceQuotas returns a ClusterResourceQuotaInformer. + ClusterResourceQuotas() ClusterResourceQuotaInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ClusterResourceQuotas returns a ClusterResourceQuotaInformer. +func (v *version) ClusterResourceQuotas() ClusterResourceQuotaInformer { + return &clusterResourceQuotaInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/quota/listers/quota/v1/appliedclusterresourcequota.go b/vendor/github.com/openshift/client-go/quota/listers/quota/v1/appliedclusterresourcequota.go new file mode 100644 index 0000000000000..6a393c0669196 --- /dev/null +++ b/vendor/github.com/openshift/client-go/quota/listers/quota/v1/appliedclusterresourcequota.go @@ -0,0 +1,54 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + quotav1 "github.com/openshift/api/quota/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// AppliedClusterResourceQuotaLister helps list AppliedClusterResourceQuotas. +// All objects returned here must be treated as read-only. +type AppliedClusterResourceQuotaLister interface { + // List lists all AppliedClusterResourceQuotas in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*quotav1.AppliedClusterResourceQuota, err error) + // AppliedClusterResourceQuotas returns an object that can list and get AppliedClusterResourceQuotas. + AppliedClusterResourceQuotas(namespace string) AppliedClusterResourceQuotaNamespaceLister + AppliedClusterResourceQuotaListerExpansion +} + +// appliedClusterResourceQuotaLister implements the AppliedClusterResourceQuotaLister interface. +type appliedClusterResourceQuotaLister struct { + listers.ResourceIndexer[*quotav1.AppliedClusterResourceQuota] +} + +// NewAppliedClusterResourceQuotaLister returns a new AppliedClusterResourceQuotaLister. +func NewAppliedClusterResourceQuotaLister(indexer cache.Indexer) AppliedClusterResourceQuotaLister { + return &appliedClusterResourceQuotaLister{listers.New[*quotav1.AppliedClusterResourceQuota](indexer, quotav1.Resource("appliedclusterresourcequota"))} +} + +// AppliedClusterResourceQuotas returns an object that can list and get AppliedClusterResourceQuotas. +func (s *appliedClusterResourceQuotaLister) AppliedClusterResourceQuotas(namespace string) AppliedClusterResourceQuotaNamespaceLister { + return appliedClusterResourceQuotaNamespaceLister{listers.NewNamespaced[*quotav1.AppliedClusterResourceQuota](s.ResourceIndexer, namespace)} +} + +// AppliedClusterResourceQuotaNamespaceLister helps list and get AppliedClusterResourceQuotas. +// All objects returned here must be treated as read-only. +type AppliedClusterResourceQuotaNamespaceLister interface { + // List lists all AppliedClusterResourceQuotas in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*quotav1.AppliedClusterResourceQuota, err error) + // Get retrieves the AppliedClusterResourceQuota from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*quotav1.AppliedClusterResourceQuota, error) + AppliedClusterResourceQuotaNamespaceListerExpansion +} + +// appliedClusterResourceQuotaNamespaceLister implements the AppliedClusterResourceQuotaNamespaceLister +// interface. +type appliedClusterResourceQuotaNamespaceLister struct { + listers.ResourceIndexer[*quotav1.AppliedClusterResourceQuota] +} diff --git a/vendor/github.com/openshift/client-go/quota/listers/quota/v1/clusterresourcequota.go b/vendor/github.com/openshift/client-go/quota/listers/quota/v1/clusterresourcequota.go new file mode 100644 index 0000000000000..ab47dd76cf930 --- /dev/null +++ b/vendor/github.com/openshift/client-go/quota/listers/quota/v1/clusterresourcequota.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + quotav1 "github.com/openshift/api/quota/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterResourceQuotaLister helps list ClusterResourceQuotas. +// All objects returned here must be treated as read-only. +type ClusterResourceQuotaLister interface { + // List lists all ClusterResourceQuotas in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*quotav1.ClusterResourceQuota, err error) + // Get retrieves the ClusterResourceQuota from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*quotav1.ClusterResourceQuota, error) + ClusterResourceQuotaListerExpansion +} + +// clusterResourceQuotaLister implements the ClusterResourceQuotaLister interface. +type clusterResourceQuotaLister struct { + listers.ResourceIndexer[*quotav1.ClusterResourceQuota] +} + +// NewClusterResourceQuotaLister returns a new ClusterResourceQuotaLister. +func NewClusterResourceQuotaLister(indexer cache.Indexer) ClusterResourceQuotaLister { + return &clusterResourceQuotaLister{listers.New[*quotav1.ClusterResourceQuota](indexer, quotav1.Resource("clusterresourcequota"))} +} diff --git a/vendor/github.com/openshift/client-go/quota/listers/quota/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/quota/listers/quota/v1/expansion_generated.go new file mode 100644 index 0000000000000..c9ee1cd57d6b8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/quota/listers/quota/v1/expansion_generated.go @@ -0,0 +1,15 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// AppliedClusterResourceQuotaListerExpansion allows custom methods to be added to +// AppliedClusterResourceQuotaLister. +type AppliedClusterResourceQuotaListerExpansion interface{} + +// AppliedClusterResourceQuotaNamespaceListerExpansion allows custom methods to be added to +// AppliedClusterResourceQuotaNamespaceLister. +type AppliedClusterResourceQuotaNamespaceListerExpansion interface{} + +// ClusterResourceQuotaListerExpansion allows custom methods to be added to +// ClusterResourceQuotaLister. +type ClusterResourceQuotaListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/internal/internal.go new file mode 100644 index 0000000000000..028bb32f03335 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/internal/internal.go @@ -0,0 +1,396 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package internal + +import ( + fmt "fmt" + sync "sync" + + typed "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +func Parser() *typed.Parser { + parserOnce.Do(func() { + var err error + parser, err = typed.NewParser(schemaYAML) + if err != nil { + panic(fmt.Sprintf("Failed to parse schema: %v", err)) + } + }) + return parser +} + +var parserOnce sync.Once +var parser *typed.Parser +var schemaYAML = typed.YAMLObject(`types: +- name: com.github.openshift.api.route.v1.LocalObjectReference + map: + fields: + - name: name + type: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.route.v1.Route + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.route.v1.RouteSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.route.v1.RouteStatus + default: {} +- name: com.github.openshift.api.route.v1.RouteHTTPHeader + map: + fields: + - name: action + type: + namedType: com.github.openshift.api.route.v1.RouteHTTPHeaderActionUnion + default: {} + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.route.v1.RouteHTTPHeaderActionUnion + map: + fields: + - name: set + type: + namedType: com.github.openshift.api.route.v1.RouteSetHTTPHeader + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: set + discriminatorValue: Set +- name: com.github.openshift.api.route.v1.RouteHTTPHeaderActions + map: + fields: + - name: request + type: + list: + elementType: + namedType: com.github.openshift.api.route.v1.RouteHTTPHeader + elementRelationship: associative + keys: + - name + - name: response + type: + list: + elementType: + namedType: com.github.openshift.api.route.v1.RouteHTTPHeader + elementRelationship: associative + keys: + - name +- name: com.github.openshift.api.route.v1.RouteHTTPHeaders + map: + fields: + - name: actions + type: + namedType: com.github.openshift.api.route.v1.RouteHTTPHeaderActions + default: {} +- name: com.github.openshift.api.route.v1.RouteIngress + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.route.v1.RouteIngressCondition + elementRelationship: associative + keys: + - type + - name: host + type: + scalar: string + - name: routerCanonicalHostname + type: + scalar: string + - name: routerName + type: + scalar: string + - name: wildcardPolicy + type: + scalar: string +- name: com.github.openshift.api.route.v1.RouteIngressCondition + map: + fields: + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message + type: + scalar: string + - name: reason + type: + scalar: string + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.route.v1.RoutePort + map: + fields: + - name: targetPort + type: + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString +- name: com.github.openshift.api.route.v1.RouteSetHTTPHeader + map: + fields: + - name: value + type: + scalar: string + default: "" +- name: com.github.openshift.api.route.v1.RouteSpec + map: + fields: + - name: alternateBackends + type: + list: + elementType: + namedType: com.github.openshift.api.route.v1.RouteTargetReference + elementRelationship: associative + keys: + - name + - kind + - name: host + type: + scalar: string + - name: httpHeaders + type: + namedType: com.github.openshift.api.route.v1.RouteHTTPHeaders + - name: path + type: + scalar: string + - name: port + type: + namedType: com.github.openshift.api.route.v1.RoutePort + - name: subdomain + type: + scalar: string + - name: tls + type: + namedType: com.github.openshift.api.route.v1.TLSConfig + - name: to + type: + namedType: com.github.openshift.api.route.v1.RouteTargetReference + default: {} + - name: wildcardPolicy + type: + scalar: string +- name: com.github.openshift.api.route.v1.RouteStatus + map: + fields: + - name: ingress + type: + list: + elementType: + namedType: com.github.openshift.api.route.v1.RouteIngress + elementRelationship: atomic +- name: com.github.openshift.api.route.v1.RouteTargetReference + map: + fields: + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: weight + type: + scalar: numeric +- name: com.github.openshift.api.route.v1.TLSConfig + map: + fields: + - name: caCertificate + type: + scalar: string + - name: certificate + type: + scalar: string + - name: destinationCACertificate + type: + scalar: string + - name: externalCertificate + type: + namedType: com.github.openshift.api.route.v1.LocalObjectReference + - name: insecureEdgeTerminationPolicy + type: + scalar: string + - name: key + type: + scalar: string + - name: termination + type: + scalar: string + default: "" +- name: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldsType + type: + scalar: string + - name: fieldsV1 + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + - name: manager + type: + scalar: string + - name: operation + type: + scalar: string + - name: subresource + type: + scalar: string + - name: time + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + map: + fields: + - name: annotations + type: + map: + elementType: + scalar: string + - name: creationTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: deletionGracePeriodSeconds + type: + scalar: numeric + - name: deletionTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: finalizers + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: generateName + type: + scalar: string + - name: generation + type: + scalar: numeric + - name: labels + type: + map: + elementType: + scalar: string + - name: managedFields + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + elementRelationship: atomic + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: ownerReferences + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + elementRelationship: associative + keys: + - uid + - name: resourceVersion + type: + scalar: string + - name: selfLink + type: + scalar: string + - name: uid + type: + scalar: string +- name: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + map: + fields: + - name: apiVersion + type: + scalar: string + default: "" + - name: blockOwnerDeletion + type: + scalar: boolean + - name: controller + type: + scalar: boolean + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: uid + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Time + scalar: untyped +- name: io.k8s.apimachinery.pkg.util.intstr.IntOrString + scalar: untyped +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +- name: __untyped_deduced_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +`) diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/localobjectreference.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/localobjectreference.go new file mode 100644 index 0000000000000..c0b6f455e9752 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/localobjectreference.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// LocalObjectReferenceApplyConfiguration represents a declarative configuration of the LocalObjectReference type for use +// with apply. +type LocalObjectReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// LocalObjectReferenceApplyConfiguration constructs a declarative configuration of the LocalObjectReference type for use with +// apply. +func LocalObjectReference() *LocalObjectReferenceApplyConfiguration { + return &LocalObjectReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *LocalObjectReferenceApplyConfiguration) WithName(value string) *LocalObjectReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/route.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/route.go new file mode 100644 index 0000000000000..b962647b8f648 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/route.go @@ -0,0 +1,248 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + routev1 "github.com/openshift/api/route/v1" + internal "github.com/openshift/client-go/route/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// RouteApplyConfiguration represents a declarative configuration of the Route type for use +// with apply. +type RouteApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *RouteSpecApplyConfiguration `json:"spec,omitempty"` + Status *RouteStatusApplyConfiguration `json:"status,omitempty"` +} + +// Route constructs a declarative configuration of the Route type for use with +// apply. +func Route(name, namespace string) *RouteApplyConfiguration { + b := &RouteApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("Route") + b.WithAPIVersion("route.openshift.io/v1") + return b +} + +// ExtractRoute extracts the applied configuration owned by fieldManager from +// route. If no managedFields are found in route for fieldManager, a +// RouteApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// route must be a unmodified Route API object that was retrieved from the Kubernetes API. +// ExtractRoute provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractRoute(route *routev1.Route, fieldManager string) (*RouteApplyConfiguration, error) { + return extractRoute(route, fieldManager, "") +} + +// ExtractRouteStatus is the same as ExtractRoute except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractRouteStatus(route *routev1.Route, fieldManager string) (*RouteApplyConfiguration, error) { + return extractRoute(route, fieldManager, "status") +} + +func extractRoute(route *routev1.Route, fieldManager string, subresource string) (*RouteApplyConfiguration, error) { + b := &RouteApplyConfiguration{} + err := managedfields.ExtractInto(route, internal.Parser().Type("com.github.openshift.api.route.v1.Route"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(route.Name) + b.WithNamespace(route.Namespace) + + b.WithKind("Route") + b.WithAPIVersion("route.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithKind(value string) *RouteApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithAPIVersion(value string) *RouteApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithName(value string) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithGenerateName(value string) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithNamespace(value string) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithUID(value types.UID) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithResourceVersion(value string) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithGeneration(value int64) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *RouteApplyConfiguration) WithLabels(entries map[string]string) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *RouteApplyConfiguration) WithAnnotations(entries map[string]string) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *RouteApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *RouteApplyConfiguration) WithFinalizers(values ...string) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *RouteApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithSpec(value *RouteSpecApplyConfiguration) *RouteApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithStatus(value *RouteStatusApplyConfiguration) *RouteApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RouteApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheader.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheader.go new file mode 100644 index 0000000000000..6223a38a14e4c --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheader.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// RouteHTTPHeaderApplyConfiguration represents a declarative configuration of the RouteHTTPHeader type for use +// with apply. +type RouteHTTPHeaderApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Action *RouteHTTPHeaderActionUnionApplyConfiguration `json:"action,omitempty"` +} + +// RouteHTTPHeaderApplyConfiguration constructs a declarative configuration of the RouteHTTPHeader type for use with +// apply. +func RouteHTTPHeader() *RouteHTTPHeaderApplyConfiguration { + return &RouteHTTPHeaderApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *RouteHTTPHeaderApplyConfiguration) WithName(value string) *RouteHTTPHeaderApplyConfiguration { + b.Name = &value + return b +} + +// WithAction sets the Action field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Action field is set to the value of the last call. +func (b *RouteHTTPHeaderApplyConfiguration) WithAction(value *RouteHTTPHeaderActionUnionApplyConfiguration) *RouteHTTPHeaderApplyConfiguration { + b.Action = value + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheaderactions.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheaderactions.go new file mode 100644 index 0000000000000..2a9f4af162150 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheaderactions.go @@ -0,0 +1,42 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// RouteHTTPHeaderActionsApplyConfiguration represents a declarative configuration of the RouteHTTPHeaderActions type for use +// with apply. +type RouteHTTPHeaderActionsApplyConfiguration struct { + Response []RouteHTTPHeaderApplyConfiguration `json:"response,omitempty"` + Request []RouteHTTPHeaderApplyConfiguration `json:"request,omitempty"` +} + +// RouteHTTPHeaderActionsApplyConfiguration constructs a declarative configuration of the RouteHTTPHeaderActions type for use with +// apply. +func RouteHTTPHeaderActions() *RouteHTTPHeaderActionsApplyConfiguration { + return &RouteHTTPHeaderActionsApplyConfiguration{} +} + +// WithResponse adds the given value to the Response field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Response field. +func (b *RouteHTTPHeaderActionsApplyConfiguration) WithResponse(values ...*RouteHTTPHeaderApplyConfiguration) *RouteHTTPHeaderActionsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResponse") + } + b.Response = append(b.Response, *values[i]) + } + return b +} + +// WithRequest adds the given value to the Request field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Request field. +func (b *RouteHTTPHeaderActionsApplyConfiguration) WithRequest(values ...*RouteHTTPHeaderApplyConfiguration) *RouteHTTPHeaderActionsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRequest") + } + b.Request = append(b.Request, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheaderactionunion.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheaderactionunion.go new file mode 100644 index 0000000000000..a54a0913cc3e0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheaderactionunion.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + routev1 "github.com/openshift/api/route/v1" +) + +// RouteHTTPHeaderActionUnionApplyConfiguration represents a declarative configuration of the RouteHTTPHeaderActionUnion type for use +// with apply. +type RouteHTTPHeaderActionUnionApplyConfiguration struct { + Type *routev1.RouteHTTPHeaderActionType `json:"type,omitempty"` + Set *RouteSetHTTPHeaderApplyConfiguration `json:"set,omitempty"` +} + +// RouteHTTPHeaderActionUnionApplyConfiguration constructs a declarative configuration of the RouteHTTPHeaderActionUnion type for use with +// apply. +func RouteHTTPHeaderActionUnion() *RouteHTTPHeaderActionUnionApplyConfiguration { + return &RouteHTTPHeaderActionUnionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *RouteHTTPHeaderActionUnionApplyConfiguration) WithType(value routev1.RouteHTTPHeaderActionType) *RouteHTTPHeaderActionUnionApplyConfiguration { + b.Type = &value + return b +} + +// WithSet sets the Set field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Set field is set to the value of the last call. +func (b *RouteHTTPHeaderActionUnionApplyConfiguration) WithSet(value *RouteSetHTTPHeaderApplyConfiguration) *RouteHTTPHeaderActionUnionApplyConfiguration { + b.Set = value + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheaders.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheaders.go new file mode 100644 index 0000000000000..0dd34776a536c --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheaders.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// RouteHTTPHeadersApplyConfiguration represents a declarative configuration of the RouteHTTPHeaders type for use +// with apply. +type RouteHTTPHeadersApplyConfiguration struct { + Actions *RouteHTTPHeaderActionsApplyConfiguration `json:"actions,omitempty"` +} + +// RouteHTTPHeadersApplyConfiguration constructs a declarative configuration of the RouteHTTPHeaders type for use with +// apply. +func RouteHTTPHeaders() *RouteHTTPHeadersApplyConfiguration { + return &RouteHTTPHeadersApplyConfiguration{} +} + +// WithActions sets the Actions field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Actions field is set to the value of the last call. +func (b *RouteHTTPHeadersApplyConfiguration) WithActions(value *RouteHTTPHeaderActionsApplyConfiguration) *RouteHTTPHeadersApplyConfiguration { + b.Actions = value + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routeingress.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routeingress.go new file mode 100644 index 0000000000000..2468d1dd51506 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routeingress.go @@ -0,0 +1,68 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + routev1 "github.com/openshift/api/route/v1" +) + +// RouteIngressApplyConfiguration represents a declarative configuration of the RouteIngress type for use +// with apply. +type RouteIngressApplyConfiguration struct { + Host *string `json:"host,omitempty"` + RouterName *string `json:"routerName,omitempty"` + Conditions []RouteIngressConditionApplyConfiguration `json:"conditions,omitempty"` + WildcardPolicy *routev1.WildcardPolicyType `json:"wildcardPolicy,omitempty"` + RouterCanonicalHostname *string `json:"routerCanonicalHostname,omitempty"` +} + +// RouteIngressApplyConfiguration constructs a declarative configuration of the RouteIngress type for use with +// apply. +func RouteIngress() *RouteIngressApplyConfiguration { + return &RouteIngressApplyConfiguration{} +} + +// WithHost sets the Host field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Host field is set to the value of the last call. +func (b *RouteIngressApplyConfiguration) WithHost(value string) *RouteIngressApplyConfiguration { + b.Host = &value + return b +} + +// WithRouterName sets the RouterName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RouterName field is set to the value of the last call. +func (b *RouteIngressApplyConfiguration) WithRouterName(value string) *RouteIngressApplyConfiguration { + b.RouterName = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *RouteIngressApplyConfiguration) WithConditions(values ...*RouteIngressConditionApplyConfiguration) *RouteIngressApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithWildcardPolicy sets the WildcardPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WildcardPolicy field is set to the value of the last call. +func (b *RouteIngressApplyConfiguration) WithWildcardPolicy(value routev1.WildcardPolicyType) *RouteIngressApplyConfiguration { + b.WildcardPolicy = &value + return b +} + +// WithRouterCanonicalHostname sets the RouterCanonicalHostname field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RouterCanonicalHostname field is set to the value of the last call. +func (b *RouteIngressApplyConfiguration) WithRouterCanonicalHostname(value string) *RouteIngressApplyConfiguration { + b.RouterCanonicalHostname = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routeingresscondition.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routeingresscondition.go new file mode 100644 index 0000000000000..1ddebe528d241 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routeingresscondition.go @@ -0,0 +1,65 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + routev1 "github.com/openshift/api/route/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// RouteIngressConditionApplyConfiguration represents a declarative configuration of the RouteIngressCondition type for use +// with apply. +type RouteIngressConditionApplyConfiguration struct { + Type *routev1.RouteIngressConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` +} + +// RouteIngressConditionApplyConfiguration constructs a declarative configuration of the RouteIngressCondition type for use with +// apply. +func RouteIngressCondition() *RouteIngressConditionApplyConfiguration { + return &RouteIngressConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *RouteIngressConditionApplyConfiguration) WithType(value routev1.RouteIngressConditionType) *RouteIngressConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *RouteIngressConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *RouteIngressConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *RouteIngressConditionApplyConfiguration) WithReason(value string) *RouteIngressConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *RouteIngressConditionApplyConfiguration) WithMessage(value string) *RouteIngressConditionApplyConfiguration { + b.Message = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *RouteIngressConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *RouteIngressConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routeport.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routeport.go new file mode 100644 index 0000000000000..d26e4564cb041 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routeport.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// RoutePortApplyConfiguration represents a declarative configuration of the RoutePort type for use +// with apply. +type RoutePortApplyConfiguration struct { + TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` +} + +// RoutePortApplyConfiguration constructs a declarative configuration of the RoutePort type for use with +// apply. +func RoutePort() *RoutePortApplyConfiguration { + return &RoutePortApplyConfiguration{} +} + +// WithTargetPort sets the TargetPort field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TargetPort field is set to the value of the last call. +func (b *RoutePortApplyConfiguration) WithTargetPort(value intstr.IntOrString) *RoutePortApplyConfiguration { + b.TargetPort = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routesethttpheader.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routesethttpheader.go new file mode 100644 index 0000000000000..cc1438e9ed1bc --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routesethttpheader.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// RouteSetHTTPHeaderApplyConfiguration represents a declarative configuration of the RouteSetHTTPHeader type for use +// with apply. +type RouteSetHTTPHeaderApplyConfiguration struct { + Value *string `json:"value,omitempty"` +} + +// RouteSetHTTPHeaderApplyConfiguration constructs a declarative configuration of the RouteSetHTTPHeader type for use with +// apply. +func RouteSetHTTPHeader() *RouteSetHTTPHeaderApplyConfiguration { + return &RouteSetHTTPHeaderApplyConfiguration{} +} + +// WithValue sets the Value field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Value field is set to the value of the last call. +func (b *RouteSetHTTPHeaderApplyConfiguration) WithValue(value string) *RouteSetHTTPHeaderApplyConfiguration { + b.Value = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routespec.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routespec.go new file mode 100644 index 0000000000000..09b6fd421f9ee --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routespec.go @@ -0,0 +1,104 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + routev1 "github.com/openshift/api/route/v1" +) + +// RouteSpecApplyConfiguration represents a declarative configuration of the RouteSpec type for use +// with apply. +type RouteSpecApplyConfiguration struct { + Host *string `json:"host,omitempty"` + Subdomain *string `json:"subdomain,omitempty"` + Path *string `json:"path,omitempty"` + To *RouteTargetReferenceApplyConfiguration `json:"to,omitempty"` + AlternateBackends []RouteTargetReferenceApplyConfiguration `json:"alternateBackends,omitempty"` + Port *RoutePortApplyConfiguration `json:"port,omitempty"` + TLS *TLSConfigApplyConfiguration `json:"tls,omitempty"` + WildcardPolicy *routev1.WildcardPolicyType `json:"wildcardPolicy,omitempty"` + HTTPHeaders *RouteHTTPHeadersApplyConfiguration `json:"httpHeaders,omitempty"` +} + +// RouteSpecApplyConfiguration constructs a declarative configuration of the RouteSpec type for use with +// apply. +func RouteSpec() *RouteSpecApplyConfiguration { + return &RouteSpecApplyConfiguration{} +} + +// WithHost sets the Host field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Host field is set to the value of the last call. +func (b *RouteSpecApplyConfiguration) WithHost(value string) *RouteSpecApplyConfiguration { + b.Host = &value + return b +} + +// WithSubdomain sets the Subdomain field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Subdomain field is set to the value of the last call. +func (b *RouteSpecApplyConfiguration) WithSubdomain(value string) *RouteSpecApplyConfiguration { + b.Subdomain = &value + return b +} + +// WithPath sets the Path field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Path field is set to the value of the last call. +func (b *RouteSpecApplyConfiguration) WithPath(value string) *RouteSpecApplyConfiguration { + b.Path = &value + return b +} + +// WithTo sets the To field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the To field is set to the value of the last call. +func (b *RouteSpecApplyConfiguration) WithTo(value *RouteTargetReferenceApplyConfiguration) *RouteSpecApplyConfiguration { + b.To = value + return b +} + +// WithAlternateBackends adds the given value to the AlternateBackends field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AlternateBackends field. +func (b *RouteSpecApplyConfiguration) WithAlternateBackends(values ...*RouteTargetReferenceApplyConfiguration) *RouteSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAlternateBackends") + } + b.AlternateBackends = append(b.AlternateBackends, *values[i]) + } + return b +} + +// WithPort sets the Port field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Port field is set to the value of the last call. +func (b *RouteSpecApplyConfiguration) WithPort(value *RoutePortApplyConfiguration) *RouteSpecApplyConfiguration { + b.Port = value + return b +} + +// WithTLS sets the TLS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLS field is set to the value of the last call. +func (b *RouteSpecApplyConfiguration) WithTLS(value *TLSConfigApplyConfiguration) *RouteSpecApplyConfiguration { + b.TLS = value + return b +} + +// WithWildcardPolicy sets the WildcardPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WildcardPolicy field is set to the value of the last call. +func (b *RouteSpecApplyConfiguration) WithWildcardPolicy(value routev1.WildcardPolicyType) *RouteSpecApplyConfiguration { + b.WildcardPolicy = &value + return b +} + +// WithHTTPHeaders sets the HTTPHeaders field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPHeaders field is set to the value of the last call. +func (b *RouteSpecApplyConfiguration) WithHTTPHeaders(value *RouteHTTPHeadersApplyConfiguration) *RouteSpecApplyConfiguration { + b.HTTPHeaders = value + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routestatus.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routestatus.go new file mode 100644 index 0000000000000..c4f5881c3b29a --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routestatus.go @@ -0,0 +1,28 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// RouteStatusApplyConfiguration represents a declarative configuration of the RouteStatus type for use +// with apply. +type RouteStatusApplyConfiguration struct { + Ingress []RouteIngressApplyConfiguration `json:"ingress,omitempty"` +} + +// RouteStatusApplyConfiguration constructs a declarative configuration of the RouteStatus type for use with +// apply. +func RouteStatus() *RouteStatusApplyConfiguration { + return &RouteStatusApplyConfiguration{} +} + +// WithIngress adds the given value to the Ingress field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ingress field. +func (b *RouteStatusApplyConfiguration) WithIngress(values ...*RouteIngressApplyConfiguration) *RouteStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithIngress") + } + b.Ingress = append(b.Ingress, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routetargetreference.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routetargetreference.go new file mode 100644 index 0000000000000..3521a17e230e3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routetargetreference.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// RouteTargetReferenceApplyConfiguration represents a declarative configuration of the RouteTargetReference type for use +// with apply. +type RouteTargetReferenceApplyConfiguration struct { + Kind *string `json:"kind,omitempty"` + Name *string `json:"name,omitempty"` + Weight *int32 `json:"weight,omitempty"` +} + +// RouteTargetReferenceApplyConfiguration constructs a declarative configuration of the RouteTargetReference type for use with +// apply. +func RouteTargetReference() *RouteTargetReferenceApplyConfiguration { + return &RouteTargetReferenceApplyConfiguration{} +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *RouteTargetReferenceApplyConfiguration) WithKind(value string) *RouteTargetReferenceApplyConfiguration { + b.Kind = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *RouteTargetReferenceApplyConfiguration) WithName(value string) *RouteTargetReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithWeight sets the Weight field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Weight field is set to the value of the last call. +func (b *RouteTargetReferenceApplyConfiguration) WithWeight(value int32) *RouteTargetReferenceApplyConfiguration { + b.Weight = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/tlsconfig.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/tlsconfig.go new file mode 100644 index 0000000000000..296c4efc9bed4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/tlsconfig.go @@ -0,0 +1,81 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + routev1 "github.com/openshift/api/route/v1" +) + +// TLSConfigApplyConfiguration represents a declarative configuration of the TLSConfig type for use +// with apply. +type TLSConfigApplyConfiguration struct { + Termination *routev1.TLSTerminationType `json:"termination,omitempty"` + Certificate *string `json:"certificate,omitempty"` + Key *string `json:"key,omitempty"` + CACertificate *string `json:"caCertificate,omitempty"` + DestinationCACertificate *string `json:"destinationCACertificate,omitempty"` + InsecureEdgeTerminationPolicy *routev1.InsecureEdgeTerminationPolicyType `json:"insecureEdgeTerminationPolicy,omitempty"` + ExternalCertificate *LocalObjectReferenceApplyConfiguration `json:"externalCertificate,omitempty"` +} + +// TLSConfigApplyConfiguration constructs a declarative configuration of the TLSConfig type for use with +// apply. +func TLSConfig() *TLSConfigApplyConfiguration { + return &TLSConfigApplyConfiguration{} +} + +// WithTermination sets the Termination field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Termination field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithTermination(value routev1.TLSTerminationType) *TLSConfigApplyConfiguration { + b.Termination = &value + return b +} + +// WithCertificate sets the Certificate field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Certificate field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithCertificate(value string) *TLSConfigApplyConfiguration { + b.Certificate = &value + return b +} + +// WithKey sets the Key field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Key field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithKey(value string) *TLSConfigApplyConfiguration { + b.Key = &value + return b +} + +// WithCACertificate sets the CACertificate field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CACertificate field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithCACertificate(value string) *TLSConfigApplyConfiguration { + b.CACertificate = &value + return b +} + +// WithDestinationCACertificate sets the DestinationCACertificate field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DestinationCACertificate field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithDestinationCACertificate(value string) *TLSConfigApplyConfiguration { + b.DestinationCACertificate = &value + return b +} + +// WithInsecureEdgeTerminationPolicy sets the InsecureEdgeTerminationPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the InsecureEdgeTerminationPolicy field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithInsecureEdgeTerminationPolicy(value routev1.InsecureEdgeTerminationPolicyType) *TLSConfigApplyConfiguration { + b.InsecureEdgeTerminationPolicy = &value + return b +} + +// WithExternalCertificate sets the ExternalCertificate field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExternalCertificate field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithExternalCertificate(value *LocalObjectReferenceApplyConfiguration) *TLSConfigApplyConfiguration { + b.ExternalCertificate = value + return b +} diff --git a/vendor/github.com/openshift/client-go/route/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/route/clientset/versioned/clientset.go new file mode 100644 index 0000000000000..e81ff98b38bfc --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/clientset/versioned/clientset.go @@ -0,0 +1,104 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + fmt "fmt" + http "net/http" + + routev1 "github.com/openshift/client-go/route/clientset/versioned/typed/route/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + RouteV1() routev1.RouteV1Interface +} + +// Clientset contains the clients for groups. +type Clientset struct { + *discovery.DiscoveryClient + routeV1 *routev1.RouteV1Client +} + +// RouteV1 retrieves the RouteV1Client +func (c *Clientset) RouteV1() routev1.RouteV1Interface { + return c.routeV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.routeV1, err = routev1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.routeV1 = routev1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/route/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/client-go/route/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000000..14db57a58f8d2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/clientset/versioned/scheme/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/openshift/client-go/route/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/route/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000000..53ac82ff5d298 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/clientset/versioned/scheme/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + routev1 "github.com/openshift/api/route/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + routev1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/doc.go b/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/doc.go new file mode 100644 index 0000000000000..225e6b2be34f2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/generated_expansion.go new file mode 100644 index 0000000000000..4f2173b6fc52a --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/generated_expansion.go @@ -0,0 +1,5 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type RouteExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/route.go b/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/route.go new file mode 100644 index 0000000000000..fa11e4aa1491d --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/route.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + routev1 "github.com/openshift/api/route/v1" + applyconfigurationsroutev1 "github.com/openshift/client-go/route/applyconfigurations/route/v1" + scheme "github.com/openshift/client-go/route/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// RoutesGetter has a method to return a RouteInterface. +// A group's client should implement this interface. +type RoutesGetter interface { + Routes(namespace string) RouteInterface +} + +// RouteInterface has methods to work with Route resources. +type RouteInterface interface { + Create(ctx context.Context, route *routev1.Route, opts metav1.CreateOptions) (*routev1.Route, error) + Update(ctx context.Context, route *routev1.Route, opts metav1.UpdateOptions) (*routev1.Route, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, route *routev1.Route, opts metav1.UpdateOptions) (*routev1.Route, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*routev1.Route, error) + List(ctx context.Context, opts metav1.ListOptions) (*routev1.RouteList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *routev1.Route, err error) + Apply(ctx context.Context, route *applyconfigurationsroutev1.RouteApplyConfiguration, opts metav1.ApplyOptions) (result *routev1.Route, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, route *applyconfigurationsroutev1.RouteApplyConfiguration, opts metav1.ApplyOptions) (result *routev1.Route, err error) + RouteExpansion +} + +// routes implements RouteInterface +type routes struct { + *gentype.ClientWithListAndApply[*routev1.Route, *routev1.RouteList, *applyconfigurationsroutev1.RouteApplyConfiguration] +} + +// newRoutes returns a Routes +func newRoutes(c *RouteV1Client, namespace string) *routes { + return &routes{ + gentype.NewClientWithListAndApply[*routev1.Route, *routev1.RouteList, *applyconfigurationsroutev1.RouteApplyConfiguration]( + "routes", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *routev1.Route { return &routev1.Route{} }, + func() *routev1.RouteList { return &routev1.RouteList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/route_client.go b/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/route_client.go new file mode 100644 index 0000000000000..44da893cdb20f --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/route_client.go @@ -0,0 +1,91 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + http "net/http" + + routev1 "github.com/openshift/api/route/v1" + scheme "github.com/openshift/client-go/route/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type RouteV1Interface interface { + RESTClient() rest.Interface + RoutesGetter +} + +// RouteV1Client is used to interact with features provided by the route.openshift.io group. +type RouteV1Client struct { + restClient rest.Interface +} + +func (c *RouteV1Client) Routes(namespace string) RouteInterface { + return newRoutes(c, namespace) +} + +// NewForConfig creates a new RouteV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*RouteV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new RouteV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*RouteV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &RouteV1Client{client}, nil +} + +// NewForConfigOrDie creates a new RouteV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *RouteV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new RouteV1Client for the given RESTClient. +func New(c rest.Interface) *RouteV1Client { + return &RouteV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := routev1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *RouteV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/client-go/route/informers/externalversions/factory.go b/vendor/github.com/openshift/client-go/route/informers/externalversions/factory.go new file mode 100644 index 0000000000000..8d23ff72b49d4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/informers/externalversions/factory.go @@ -0,0 +1,246 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/openshift/client-go/route/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/route/informers/externalversions/internalinterfaces" + route "github.com/openshift/client-go/route/informers/externalversions/route" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + transform cache.TransformFunc + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.shuttingDown { + return + } + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() + f.startedInformers[informerType] = true + } + } +} + +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + + Route() route.Interface +} + +func (f *sharedInformerFactory) Route() route.Interface { + return route.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/route/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/route/informers/externalversions/generic.go new file mode 100644 index 0000000000000..1615e24cd1984 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/informers/externalversions/generic.go @@ -0,0 +1,46 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + fmt "fmt" + + v1 "github.com/openshift/api/route/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=route.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("routes"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Route().V1().Routes().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/openshift/client-go/route/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/openshift/client-go/route/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000000..1f807bab67f84 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,24 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/openshift/client-go/route/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/openshift/client-go/route/informers/externalversions/route/interface.go b/vendor/github.com/openshift/client-go/route/informers/externalversions/route/interface.go new file mode 100644 index 0000000000000..69e1be333c3fe --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/informers/externalversions/route/interface.go @@ -0,0 +1,30 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package route + +import ( + internalinterfaces "github.com/openshift/client-go/route/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/route/informers/externalversions/route/v1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/route/informers/externalversions/route/v1/interface.go b/vendor/github.com/openshift/client-go/route/informers/externalversions/route/v1/interface.go new file mode 100644 index 0000000000000..63ee15aecfdb5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/informers/externalversions/route/v1/interface.go @@ -0,0 +1,29 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/openshift/client-go/route/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Routes returns a RouteInformer. + Routes() RouteInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Routes returns a RouteInformer. +func (v *version) Routes() RouteInformer { + return &routeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/route/informers/externalversions/route/v1/route.go b/vendor/github.com/openshift/client-go/route/informers/externalversions/route/v1/route.go new file mode 100644 index 0000000000000..f437983ff84a3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/informers/externalversions/route/v1/route.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiroutev1 "github.com/openshift/api/route/v1" + versioned "github.com/openshift/client-go/route/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/route/informers/externalversions/internalinterfaces" + routev1 "github.com/openshift/client-go/route/listers/route/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// RouteInformer provides access to a shared informer and lister for +// Routes. +type RouteInformer interface { + Informer() cache.SharedIndexInformer + Lister() routev1.RouteLister +} + +type routeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRouteInformer constructs a new informer for Route type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRouteInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRouteInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRouteInformer constructs a new informer for Route type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRouteInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RouteV1().Routes(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RouteV1().Routes(namespace).Watch(context.TODO(), options) + }, + }, + &apiroutev1.Route{}, + resyncPeriod, + indexers, + ) +} + +func (f *routeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRouteInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *routeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiroutev1.Route{}, f.defaultInformer) +} + +func (f *routeInformer) Lister() routev1.RouteLister { + return routev1.NewRouteLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/route/listers/route/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/route/listers/route/v1/expansion_generated.go new file mode 100644 index 0000000000000..74feb63800a13 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/listers/route/v1/expansion_generated.go @@ -0,0 +1,11 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// RouteListerExpansion allows custom methods to be added to +// RouteLister. +type RouteListerExpansion interface{} + +// RouteNamespaceListerExpansion allows custom methods to be added to +// RouteNamespaceLister. +type RouteNamespaceListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/route/listers/route/v1/route.go b/vendor/github.com/openshift/client-go/route/listers/route/v1/route.go new file mode 100644 index 0000000000000..abe1b0efd50cd --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/listers/route/v1/route.go @@ -0,0 +1,54 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + routev1 "github.com/openshift/api/route/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// RouteLister helps list Routes. +// All objects returned here must be treated as read-only. +type RouteLister interface { + // List lists all Routes in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*routev1.Route, err error) + // Routes returns an object that can list and get Routes. + Routes(namespace string) RouteNamespaceLister + RouteListerExpansion +} + +// routeLister implements the RouteLister interface. +type routeLister struct { + listers.ResourceIndexer[*routev1.Route] +} + +// NewRouteLister returns a new RouteLister. +func NewRouteLister(indexer cache.Indexer) RouteLister { + return &routeLister{listers.New[*routev1.Route](indexer, routev1.Resource("route"))} +} + +// Routes returns an object that can list and get Routes. +func (s *routeLister) Routes(namespace string) RouteNamespaceLister { + return routeNamespaceLister{listers.NewNamespaced[*routev1.Route](s.ResourceIndexer, namespace)} +} + +// RouteNamespaceLister helps list and get Routes. +// All objects returned here must be treated as read-only. +type RouteNamespaceLister interface { + // List lists all Routes in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*routev1.Route, err error) + // Get retrieves the Route from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*routev1.Route, error) + RouteNamespaceListerExpansion +} + +// routeNamespaceLister implements the RouteNamespaceLister +// interface. +type routeNamespaceLister struct { + listers.ResourceIndexer[*routev1.Route] +} diff --git a/vendor/github.com/openshift/client-go/security/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/security/applyconfigurations/internal/internal.go new file mode 100644 index 0000000000000..995779a57d143 --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/applyconfigurations/internal/internal.go @@ -0,0 +1,402 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package internal + +import ( + fmt "fmt" + sync "sync" + + typed "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +func Parser() *typed.Parser { + parserOnce.Do(func() { + var err error + parser, err = typed.NewParser(schemaYAML) + if err != nil { + panic(fmt.Sprintf("Failed to parse schema: %v", err)) + } + }) + return parser +} + +var parserOnce sync.Once +var parser *typed.Parser +var schemaYAML = typed.YAMLObject(`types: +- name: com.github.openshift.api.security.v1.AllowedFlexVolume + map: + fields: + - name: driver + type: + scalar: string + default: "" +- name: com.github.openshift.api.security.v1.FSGroupStrategyOptions + map: + fields: + - name: ranges + type: + list: + elementType: + namedType: com.github.openshift.api.security.v1.IDRange + elementRelationship: atomic + - name: type + type: + scalar: string +- name: com.github.openshift.api.security.v1.IDRange + map: + fields: + - name: max + type: + scalar: numeric + - name: min + type: + scalar: numeric +- name: com.github.openshift.api.security.v1.RangeAllocation + map: + fields: + - name: apiVersion + type: + scalar: string + - name: data + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: range + type: + scalar: string + default: "" +- name: com.github.openshift.api.security.v1.RunAsUserStrategyOptions + map: + fields: + - name: type + type: + scalar: string + - name: uid + type: + scalar: numeric + - name: uidRangeMax + type: + scalar: numeric + - name: uidRangeMin + type: + scalar: numeric +- name: com.github.openshift.api.security.v1.SELinuxContextStrategyOptions + map: + fields: + - name: seLinuxOptions + type: + namedType: io.k8s.api.core.v1.SELinuxOptions + - name: type + type: + scalar: string +- name: com.github.openshift.api.security.v1.SecurityContextConstraints + map: + fields: + - name: allowHostDirVolumePlugin + type: + scalar: boolean + default: false + - name: allowHostIPC + type: + scalar: boolean + default: false + - name: allowHostNetwork + type: + scalar: boolean + default: false + - name: allowHostPID + type: + scalar: boolean + default: false + - name: allowHostPorts + type: + scalar: boolean + default: false + - name: allowPrivilegeEscalation + type: + scalar: boolean + - name: allowPrivilegedContainer + type: + scalar: boolean + default: false + - name: allowedCapabilities + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: allowedFlexVolumes + type: + list: + elementType: + namedType: com.github.openshift.api.security.v1.AllowedFlexVolume + elementRelationship: atomic + - name: allowedUnsafeSysctls + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: apiVersion + type: + scalar: string + - name: defaultAddCapabilities + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: defaultAllowPrivilegeEscalation + type: + scalar: boolean + - name: forbiddenSysctls + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: fsGroup + type: + namedType: com.github.openshift.api.security.v1.FSGroupStrategyOptions + default: {} + - name: groups + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: priority + type: + scalar: numeric + - name: readOnlyRootFilesystem + type: + scalar: boolean + default: false + - name: requiredDropCapabilities + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: runAsUser + type: + namedType: com.github.openshift.api.security.v1.RunAsUserStrategyOptions + default: {} + - name: seLinuxContext + type: + namedType: com.github.openshift.api.security.v1.SELinuxContextStrategyOptions + default: {} + - name: seccompProfiles + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: supplementalGroups + type: + namedType: com.github.openshift.api.security.v1.SupplementalGroupsStrategyOptions + default: {} + - name: userNamespaceLevel + type: + scalar: string + default: AllowHostLevel + - name: users + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: volumes + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.security.v1.SupplementalGroupsStrategyOptions + map: + fields: + - name: ranges + type: + list: + elementType: + namedType: com.github.openshift.api.security.v1.IDRange + elementRelationship: atomic + - name: type + type: + scalar: string +- name: io.k8s.api.core.v1.SELinuxOptions + map: + fields: + - name: level + type: + scalar: string + - name: role + type: + scalar: string + - name: type + type: + scalar: string + - name: user + type: + scalar: string +- name: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldsType + type: + scalar: string + - name: fieldsV1 + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + - name: manager + type: + scalar: string + - name: operation + type: + scalar: string + - name: subresource + type: + scalar: string + - name: time + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + map: + fields: + - name: annotations + type: + map: + elementType: + scalar: string + - name: creationTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: deletionGracePeriodSeconds + type: + scalar: numeric + - name: deletionTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: finalizers + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: generateName + type: + scalar: string + - name: generation + type: + scalar: numeric + - name: labels + type: + map: + elementType: + scalar: string + - name: managedFields + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + elementRelationship: atomic + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: ownerReferences + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + elementRelationship: associative + keys: + - uid + - name: resourceVersion + type: + scalar: string + - name: selfLink + type: + scalar: string + - name: uid + type: + scalar: string +- name: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + map: + fields: + - name: apiVersion + type: + scalar: string + default: "" + - name: blockOwnerDeletion + type: + scalar: boolean + - name: controller + type: + scalar: boolean + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: uid + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Time + scalar: untyped +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +- name: __untyped_deduced_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +`) diff --git a/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/allowedflexvolume.go b/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/allowedflexvolume.go new file mode 100644 index 0000000000000..0d9e9b40aa535 --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/allowedflexvolume.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AllowedFlexVolumeApplyConfiguration represents a declarative configuration of the AllowedFlexVolume type for use +// with apply. +type AllowedFlexVolumeApplyConfiguration struct { + Driver *string `json:"driver,omitempty"` +} + +// AllowedFlexVolumeApplyConfiguration constructs a declarative configuration of the AllowedFlexVolume type for use with +// apply. +func AllowedFlexVolume() *AllowedFlexVolumeApplyConfiguration { + return &AllowedFlexVolumeApplyConfiguration{} +} + +// WithDriver sets the Driver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Driver field is set to the value of the last call. +func (b *AllowedFlexVolumeApplyConfiguration) WithDriver(value string) *AllowedFlexVolumeApplyConfiguration { + b.Driver = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/fsgroupstrategyoptions.go b/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/fsgroupstrategyoptions.go new file mode 100644 index 0000000000000..d9c1dd1a606fe --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/fsgroupstrategyoptions.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + securityv1 "github.com/openshift/api/security/v1" +) + +// FSGroupStrategyOptionsApplyConfiguration represents a declarative configuration of the FSGroupStrategyOptions type for use +// with apply. +type FSGroupStrategyOptionsApplyConfiguration struct { + Type *securityv1.FSGroupStrategyType `json:"type,omitempty"` + Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` +} + +// FSGroupStrategyOptionsApplyConfiguration constructs a declarative configuration of the FSGroupStrategyOptions type for use with +// apply. +func FSGroupStrategyOptions() *FSGroupStrategyOptionsApplyConfiguration { + return &FSGroupStrategyOptionsApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *FSGroupStrategyOptionsApplyConfiguration) WithType(value securityv1.FSGroupStrategyType) *FSGroupStrategyOptionsApplyConfiguration { + b.Type = &value + return b +} + +// WithRanges adds the given value to the Ranges field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ranges field. +func (b *FSGroupStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *FSGroupStrategyOptionsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRanges") + } + b.Ranges = append(b.Ranges, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/idrange.go b/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/idrange.go new file mode 100644 index 0000000000000..70c20336a0a8b --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/idrange.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// IDRangeApplyConfiguration represents a declarative configuration of the IDRange type for use +// with apply. +type IDRangeApplyConfiguration struct { + Min *int64 `json:"min,omitempty"` + Max *int64 `json:"max,omitempty"` +} + +// IDRangeApplyConfiguration constructs a declarative configuration of the IDRange type for use with +// apply. +func IDRange() *IDRangeApplyConfiguration { + return &IDRangeApplyConfiguration{} +} + +// WithMin sets the Min field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Min field is set to the value of the last call. +func (b *IDRangeApplyConfiguration) WithMin(value int64) *IDRangeApplyConfiguration { + b.Min = &value + return b +} + +// WithMax sets the Max field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Max field is set to the value of the last call. +func (b *IDRangeApplyConfiguration) WithMax(value int64) *IDRangeApplyConfiguration { + b.Max = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/rangeallocation.go b/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/rangeallocation.go new file mode 100644 index 0000000000000..1993bd8d727e5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/rangeallocation.go @@ -0,0 +1,248 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + securityv1 "github.com/openshift/api/security/v1" + internal "github.com/openshift/client-go/security/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// RangeAllocationApplyConfiguration represents a declarative configuration of the RangeAllocation type for use +// with apply. +type RangeAllocationApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Range *string `json:"range,omitempty"` + Data []byte `json:"data,omitempty"` +} + +// RangeAllocation constructs a declarative configuration of the RangeAllocation type for use with +// apply. +func RangeAllocation(name string) *RangeAllocationApplyConfiguration { + b := &RangeAllocationApplyConfiguration{} + b.WithName(name) + b.WithKind("RangeAllocation") + b.WithAPIVersion("security.openshift.io/v1") + return b +} + +// ExtractRangeAllocation extracts the applied configuration owned by fieldManager from +// rangeAllocation. If no managedFields are found in rangeAllocation for fieldManager, a +// RangeAllocationApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// rangeAllocation must be a unmodified RangeAllocation API object that was retrieved from the Kubernetes API. +// ExtractRangeAllocation provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractRangeAllocation(rangeAllocation *securityv1.RangeAllocation, fieldManager string) (*RangeAllocationApplyConfiguration, error) { + return extractRangeAllocation(rangeAllocation, fieldManager, "") +} + +// ExtractRangeAllocationStatus is the same as ExtractRangeAllocation except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractRangeAllocationStatus(rangeAllocation *securityv1.RangeAllocation, fieldManager string) (*RangeAllocationApplyConfiguration, error) { + return extractRangeAllocation(rangeAllocation, fieldManager, "status") +} + +func extractRangeAllocation(rangeAllocation *securityv1.RangeAllocation, fieldManager string, subresource string) (*RangeAllocationApplyConfiguration, error) { + b := &RangeAllocationApplyConfiguration{} + err := managedfields.ExtractInto(rangeAllocation, internal.Parser().Type("com.github.openshift.api.security.v1.RangeAllocation"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(rangeAllocation.Name) + + b.WithKind("RangeAllocation") + b.WithAPIVersion("security.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *RangeAllocationApplyConfiguration) WithKind(value string) *RangeAllocationApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *RangeAllocationApplyConfiguration) WithAPIVersion(value string) *RangeAllocationApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *RangeAllocationApplyConfiguration) WithName(value string) *RangeAllocationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *RangeAllocationApplyConfiguration) WithGenerateName(value string) *RangeAllocationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *RangeAllocationApplyConfiguration) WithNamespace(value string) *RangeAllocationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *RangeAllocationApplyConfiguration) WithUID(value types.UID) *RangeAllocationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *RangeAllocationApplyConfiguration) WithResourceVersion(value string) *RangeAllocationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *RangeAllocationApplyConfiguration) WithGeneration(value int64) *RangeAllocationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *RangeAllocationApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *RangeAllocationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *RangeAllocationApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *RangeAllocationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *RangeAllocationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RangeAllocationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *RangeAllocationApplyConfiguration) WithLabels(entries map[string]string) *RangeAllocationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *RangeAllocationApplyConfiguration) WithAnnotations(entries map[string]string) *RangeAllocationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *RangeAllocationApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *RangeAllocationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *RangeAllocationApplyConfiguration) WithFinalizers(values ...string) *RangeAllocationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *RangeAllocationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithRange sets the Range field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Range field is set to the value of the last call. +func (b *RangeAllocationApplyConfiguration) WithRange(value string) *RangeAllocationApplyConfiguration { + b.Range = &value + return b +} + +// WithData adds the given value to the Data field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Data field. +func (b *RangeAllocationApplyConfiguration) WithData(values ...byte) *RangeAllocationApplyConfiguration { + for i := range values { + b.Data = append(b.Data, values[i]) + } + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RangeAllocationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/runasuserstrategyoptions.go b/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/runasuserstrategyoptions.go new file mode 100644 index 0000000000000..e93f8d6436158 --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/runasuserstrategyoptions.go @@ -0,0 +1,54 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + securityv1 "github.com/openshift/api/security/v1" +) + +// RunAsUserStrategyOptionsApplyConfiguration represents a declarative configuration of the RunAsUserStrategyOptions type for use +// with apply. +type RunAsUserStrategyOptionsApplyConfiguration struct { + Type *securityv1.RunAsUserStrategyType `json:"type,omitempty"` + UID *int64 `json:"uid,omitempty"` + UIDRangeMin *int64 `json:"uidRangeMin,omitempty"` + UIDRangeMax *int64 `json:"uidRangeMax,omitempty"` +} + +// RunAsUserStrategyOptionsApplyConfiguration constructs a declarative configuration of the RunAsUserStrategyOptions type for use with +// apply. +func RunAsUserStrategyOptions() *RunAsUserStrategyOptionsApplyConfiguration { + return &RunAsUserStrategyOptionsApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *RunAsUserStrategyOptionsApplyConfiguration) WithType(value securityv1.RunAsUserStrategyType) *RunAsUserStrategyOptionsApplyConfiguration { + b.Type = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *RunAsUserStrategyOptionsApplyConfiguration) WithUID(value int64) *RunAsUserStrategyOptionsApplyConfiguration { + b.UID = &value + return b +} + +// WithUIDRangeMin sets the UIDRangeMin field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UIDRangeMin field is set to the value of the last call. +func (b *RunAsUserStrategyOptionsApplyConfiguration) WithUIDRangeMin(value int64) *RunAsUserStrategyOptionsApplyConfiguration { + b.UIDRangeMin = &value + return b +} + +// WithUIDRangeMax sets the UIDRangeMax field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UIDRangeMax field is set to the value of the last call. +func (b *RunAsUserStrategyOptionsApplyConfiguration) WithUIDRangeMax(value int64) *RunAsUserStrategyOptionsApplyConfiguration { + b.UIDRangeMax = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/securitycontextconstraints.go b/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/securitycontextconstraints.go new file mode 100644 index 0000000000000..ecd908713ffce --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/securitycontextconstraints.go @@ -0,0 +1,477 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + securityv1 "github.com/openshift/api/security/v1" + internal "github.com/openshift/client-go/security/applyconfigurations/internal" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// SecurityContextConstraintsApplyConfiguration represents a declarative configuration of the SecurityContextConstraints type for use +// with apply. +type SecurityContextConstraintsApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Priority *int32 `json:"priority,omitempty"` + AllowPrivilegedContainer *bool `json:"allowPrivilegedContainer,omitempty"` + DefaultAddCapabilities []corev1.Capability `json:"defaultAddCapabilities,omitempty"` + RequiredDropCapabilities []corev1.Capability `json:"requiredDropCapabilities,omitempty"` + AllowedCapabilities []corev1.Capability `json:"allowedCapabilities,omitempty"` + AllowHostDirVolumePlugin *bool `json:"allowHostDirVolumePlugin,omitempty"` + Volumes []securityv1.FSType `json:"volumes,omitempty"` + AllowedFlexVolumes []AllowedFlexVolumeApplyConfiguration `json:"allowedFlexVolumes,omitempty"` + AllowHostNetwork *bool `json:"allowHostNetwork,omitempty"` + AllowHostPorts *bool `json:"allowHostPorts,omitempty"` + AllowHostPID *bool `json:"allowHostPID,omitempty"` + AllowHostIPC *bool `json:"allowHostIPC,omitempty"` + UserNamespaceLevel *securityv1.NamespaceLevelType `json:"userNamespaceLevel,omitempty"` + DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty"` + AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty"` + SELinuxContext *SELinuxContextStrategyOptionsApplyConfiguration `json:"seLinuxContext,omitempty"` + RunAsUser *RunAsUserStrategyOptionsApplyConfiguration `json:"runAsUser,omitempty"` + SupplementalGroups *SupplementalGroupsStrategyOptionsApplyConfiguration `json:"supplementalGroups,omitempty"` + FSGroup *FSGroupStrategyOptionsApplyConfiguration `json:"fsGroup,omitempty"` + ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty"` + Users []string `json:"users,omitempty"` + Groups []string `json:"groups,omitempty"` + SeccompProfiles []string `json:"seccompProfiles,omitempty"` + AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty"` + ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty"` +} + +// SecurityContextConstraints constructs a declarative configuration of the SecurityContextConstraints type for use with +// apply. +func SecurityContextConstraints(name string) *SecurityContextConstraintsApplyConfiguration { + b := &SecurityContextConstraintsApplyConfiguration{} + b.WithName(name) + b.WithKind("SecurityContextConstraints") + b.WithAPIVersion("security.openshift.io/v1") + return b +} + +// ExtractSecurityContextConstraints extracts the applied configuration owned by fieldManager from +// securityContextConstraints. If no managedFields are found in securityContextConstraints for fieldManager, a +// SecurityContextConstraintsApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// securityContextConstraints must be a unmodified SecurityContextConstraints API object that was retrieved from the Kubernetes API. +// ExtractSecurityContextConstraints provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractSecurityContextConstraints(securityContextConstraints *securityv1.SecurityContextConstraints, fieldManager string) (*SecurityContextConstraintsApplyConfiguration, error) { + return extractSecurityContextConstraints(securityContextConstraints, fieldManager, "") +} + +// ExtractSecurityContextConstraintsStatus is the same as ExtractSecurityContextConstraints except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractSecurityContextConstraintsStatus(securityContextConstraints *securityv1.SecurityContextConstraints, fieldManager string) (*SecurityContextConstraintsApplyConfiguration, error) { + return extractSecurityContextConstraints(securityContextConstraints, fieldManager, "status") +} + +func extractSecurityContextConstraints(securityContextConstraints *securityv1.SecurityContextConstraints, fieldManager string, subresource string) (*SecurityContextConstraintsApplyConfiguration, error) { + b := &SecurityContextConstraintsApplyConfiguration{} + err := managedfields.ExtractInto(securityContextConstraints, internal.Parser().Type("com.github.openshift.api.security.v1.SecurityContextConstraints"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(securityContextConstraints.Name) + + b.WithKind("SecurityContextConstraints") + b.WithAPIVersion("security.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithKind(value string) *SecurityContextConstraintsApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithAPIVersion(value string) *SecurityContextConstraintsApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithName(value string) *SecurityContextConstraintsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithGenerateName(value string) *SecurityContextConstraintsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithNamespace(value string) *SecurityContextConstraintsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithUID(value types.UID) *SecurityContextConstraintsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithResourceVersion(value string) *SecurityContextConstraintsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithGeneration(value int64) *SecurityContextConstraintsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *SecurityContextConstraintsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *SecurityContextConstraintsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *SecurityContextConstraintsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *SecurityContextConstraintsApplyConfiguration) WithLabels(entries map[string]string) *SecurityContextConstraintsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *SecurityContextConstraintsApplyConfiguration) WithAnnotations(entries map[string]string) *SecurityContextConstraintsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *SecurityContextConstraintsApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *SecurityContextConstraintsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *SecurityContextConstraintsApplyConfiguration) WithFinalizers(values ...string) *SecurityContextConstraintsApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *SecurityContextConstraintsApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithPriority sets the Priority field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Priority field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithPriority(value int32) *SecurityContextConstraintsApplyConfiguration { + b.Priority = &value + return b +} + +// WithAllowPrivilegedContainer sets the AllowPrivilegedContainer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllowPrivilegedContainer field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithAllowPrivilegedContainer(value bool) *SecurityContextConstraintsApplyConfiguration { + b.AllowPrivilegedContainer = &value + return b +} + +// WithDefaultAddCapabilities adds the given value to the DefaultAddCapabilities field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the DefaultAddCapabilities field. +func (b *SecurityContextConstraintsApplyConfiguration) WithDefaultAddCapabilities(values ...corev1.Capability) *SecurityContextConstraintsApplyConfiguration { + for i := range values { + b.DefaultAddCapabilities = append(b.DefaultAddCapabilities, values[i]) + } + return b +} + +// WithRequiredDropCapabilities adds the given value to the RequiredDropCapabilities field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RequiredDropCapabilities field. +func (b *SecurityContextConstraintsApplyConfiguration) WithRequiredDropCapabilities(values ...corev1.Capability) *SecurityContextConstraintsApplyConfiguration { + for i := range values { + b.RequiredDropCapabilities = append(b.RequiredDropCapabilities, values[i]) + } + return b +} + +// WithAllowedCapabilities adds the given value to the AllowedCapabilities field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AllowedCapabilities field. +func (b *SecurityContextConstraintsApplyConfiguration) WithAllowedCapabilities(values ...corev1.Capability) *SecurityContextConstraintsApplyConfiguration { + for i := range values { + b.AllowedCapabilities = append(b.AllowedCapabilities, values[i]) + } + return b +} + +// WithAllowHostDirVolumePlugin sets the AllowHostDirVolumePlugin field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllowHostDirVolumePlugin field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithAllowHostDirVolumePlugin(value bool) *SecurityContextConstraintsApplyConfiguration { + b.AllowHostDirVolumePlugin = &value + return b +} + +// WithVolumes adds the given value to the Volumes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Volumes field. +func (b *SecurityContextConstraintsApplyConfiguration) WithVolumes(values ...securityv1.FSType) *SecurityContextConstraintsApplyConfiguration { + for i := range values { + b.Volumes = append(b.Volumes, values[i]) + } + return b +} + +// WithAllowedFlexVolumes adds the given value to the AllowedFlexVolumes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AllowedFlexVolumes field. +func (b *SecurityContextConstraintsApplyConfiguration) WithAllowedFlexVolumes(values ...*AllowedFlexVolumeApplyConfiguration) *SecurityContextConstraintsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAllowedFlexVolumes") + } + b.AllowedFlexVolumes = append(b.AllowedFlexVolumes, *values[i]) + } + return b +} + +// WithAllowHostNetwork sets the AllowHostNetwork field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllowHostNetwork field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithAllowHostNetwork(value bool) *SecurityContextConstraintsApplyConfiguration { + b.AllowHostNetwork = &value + return b +} + +// WithAllowHostPorts sets the AllowHostPorts field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllowHostPorts field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithAllowHostPorts(value bool) *SecurityContextConstraintsApplyConfiguration { + b.AllowHostPorts = &value + return b +} + +// WithAllowHostPID sets the AllowHostPID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllowHostPID field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithAllowHostPID(value bool) *SecurityContextConstraintsApplyConfiguration { + b.AllowHostPID = &value + return b +} + +// WithAllowHostIPC sets the AllowHostIPC field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllowHostIPC field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithAllowHostIPC(value bool) *SecurityContextConstraintsApplyConfiguration { + b.AllowHostIPC = &value + return b +} + +// WithUserNamespaceLevel sets the UserNamespaceLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UserNamespaceLevel field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithUserNamespaceLevel(value securityv1.NamespaceLevelType) *SecurityContextConstraintsApplyConfiguration { + b.UserNamespaceLevel = &value + return b +} + +// WithDefaultAllowPrivilegeEscalation sets the DefaultAllowPrivilegeEscalation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DefaultAllowPrivilegeEscalation field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithDefaultAllowPrivilegeEscalation(value bool) *SecurityContextConstraintsApplyConfiguration { + b.DefaultAllowPrivilegeEscalation = &value + return b +} + +// WithAllowPrivilegeEscalation sets the AllowPrivilegeEscalation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllowPrivilegeEscalation field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithAllowPrivilegeEscalation(value bool) *SecurityContextConstraintsApplyConfiguration { + b.AllowPrivilegeEscalation = &value + return b +} + +// WithSELinuxContext sets the SELinuxContext field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SELinuxContext field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithSELinuxContext(value *SELinuxContextStrategyOptionsApplyConfiguration) *SecurityContextConstraintsApplyConfiguration { + b.SELinuxContext = value + return b +} + +// WithRunAsUser sets the RunAsUser field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RunAsUser field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithRunAsUser(value *RunAsUserStrategyOptionsApplyConfiguration) *SecurityContextConstraintsApplyConfiguration { + b.RunAsUser = value + return b +} + +// WithSupplementalGroups sets the SupplementalGroups field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SupplementalGroups field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithSupplementalGroups(value *SupplementalGroupsStrategyOptionsApplyConfiguration) *SecurityContextConstraintsApplyConfiguration { + b.SupplementalGroups = value + return b +} + +// WithFSGroup sets the FSGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FSGroup field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithFSGroup(value *FSGroupStrategyOptionsApplyConfiguration) *SecurityContextConstraintsApplyConfiguration { + b.FSGroup = value + return b +} + +// WithReadOnlyRootFilesystem sets the ReadOnlyRootFilesystem field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadOnlyRootFilesystem field is set to the value of the last call. +func (b *SecurityContextConstraintsApplyConfiguration) WithReadOnlyRootFilesystem(value bool) *SecurityContextConstraintsApplyConfiguration { + b.ReadOnlyRootFilesystem = &value + return b +} + +// WithUsers adds the given value to the Users field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Users field. +func (b *SecurityContextConstraintsApplyConfiguration) WithUsers(values ...string) *SecurityContextConstraintsApplyConfiguration { + for i := range values { + b.Users = append(b.Users, values[i]) + } + return b +} + +// WithGroups adds the given value to the Groups field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Groups field. +func (b *SecurityContextConstraintsApplyConfiguration) WithGroups(values ...string) *SecurityContextConstraintsApplyConfiguration { + for i := range values { + b.Groups = append(b.Groups, values[i]) + } + return b +} + +// WithSeccompProfiles adds the given value to the SeccompProfiles field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the SeccompProfiles field. +func (b *SecurityContextConstraintsApplyConfiguration) WithSeccompProfiles(values ...string) *SecurityContextConstraintsApplyConfiguration { + for i := range values { + b.SeccompProfiles = append(b.SeccompProfiles, values[i]) + } + return b +} + +// WithAllowedUnsafeSysctls adds the given value to the AllowedUnsafeSysctls field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AllowedUnsafeSysctls field. +func (b *SecurityContextConstraintsApplyConfiguration) WithAllowedUnsafeSysctls(values ...string) *SecurityContextConstraintsApplyConfiguration { + for i := range values { + b.AllowedUnsafeSysctls = append(b.AllowedUnsafeSysctls, values[i]) + } + return b +} + +// WithForbiddenSysctls adds the given value to the ForbiddenSysctls field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ForbiddenSysctls field. +func (b *SecurityContextConstraintsApplyConfiguration) WithForbiddenSysctls(values ...string) *SecurityContextConstraintsApplyConfiguration { + for i := range values { + b.ForbiddenSysctls = append(b.ForbiddenSysctls, values[i]) + } + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *SecurityContextConstraintsApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/selinuxcontextstrategyoptions.go b/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/selinuxcontextstrategyoptions.go new file mode 100644 index 0000000000000..985472fea422c --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/selinuxcontextstrategyoptions.go @@ -0,0 +1,37 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + securityv1 "github.com/openshift/api/security/v1" + corev1 "k8s.io/api/core/v1" +) + +// SELinuxContextStrategyOptionsApplyConfiguration represents a declarative configuration of the SELinuxContextStrategyOptions type for use +// with apply. +type SELinuxContextStrategyOptionsApplyConfiguration struct { + Type *securityv1.SELinuxContextStrategyType `json:"type,omitempty"` + SELinuxOptions *corev1.SELinuxOptions `json:"seLinuxOptions,omitempty"` +} + +// SELinuxContextStrategyOptionsApplyConfiguration constructs a declarative configuration of the SELinuxContextStrategyOptions type for use with +// apply. +func SELinuxContextStrategyOptions() *SELinuxContextStrategyOptionsApplyConfiguration { + return &SELinuxContextStrategyOptionsApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *SELinuxContextStrategyOptionsApplyConfiguration) WithType(value securityv1.SELinuxContextStrategyType) *SELinuxContextStrategyOptionsApplyConfiguration { + b.Type = &value + return b +} + +// WithSELinuxOptions sets the SELinuxOptions field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SELinuxOptions field is set to the value of the last call. +func (b *SELinuxContextStrategyOptionsApplyConfiguration) WithSELinuxOptions(value corev1.SELinuxOptions) *SELinuxContextStrategyOptionsApplyConfiguration { + b.SELinuxOptions = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/supplementalgroupsstrategyoptions.go b/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/supplementalgroupsstrategyoptions.go new file mode 100644 index 0000000000000..e1f1db6211e12 --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/applyconfigurations/security/v1/supplementalgroupsstrategyoptions.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + securityv1 "github.com/openshift/api/security/v1" +) + +// SupplementalGroupsStrategyOptionsApplyConfiguration represents a declarative configuration of the SupplementalGroupsStrategyOptions type for use +// with apply. +type SupplementalGroupsStrategyOptionsApplyConfiguration struct { + Type *securityv1.SupplementalGroupsStrategyType `json:"type,omitempty"` + Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` +} + +// SupplementalGroupsStrategyOptionsApplyConfiguration constructs a declarative configuration of the SupplementalGroupsStrategyOptions type for use with +// apply. +func SupplementalGroupsStrategyOptions() *SupplementalGroupsStrategyOptionsApplyConfiguration { + return &SupplementalGroupsStrategyOptionsApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *SupplementalGroupsStrategyOptionsApplyConfiguration) WithType(value securityv1.SupplementalGroupsStrategyType) *SupplementalGroupsStrategyOptionsApplyConfiguration { + b.Type = &value + return b +} + +// WithRanges adds the given value to the Ranges field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ranges field. +func (b *SupplementalGroupsStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *SupplementalGroupsStrategyOptionsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRanges") + } + b.Ranges = append(b.Ranges, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/clientset.go new file mode 100644 index 0000000000000..62f63b51f9284 --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/clientset/versioned/clientset.go @@ -0,0 +1,104 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + fmt "fmt" + http "net/http" + + securityv1 "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + SecurityV1() securityv1.SecurityV1Interface +} + +// Clientset contains the clients for groups. +type Clientset struct { + *discovery.DiscoveryClient + securityV1 *securityv1.SecurityV1Client +} + +// SecurityV1 retrieves the SecurityV1Client +func (c *Clientset) SecurityV1() securityv1.SecurityV1Interface { + return c.securityV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.securityV1, err = securityv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.securityV1 = securityv1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000000..14db57a58f8d2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/clientset/versioned/scheme/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000000..8201b0f01517b --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/clientset/versioned/scheme/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + securityv1 "github.com/openshift/api/security/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + securityv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/doc.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/doc.go new file mode 100644 index 0000000000000..225e6b2be34f2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/generated_expansion.go new file mode 100644 index 0000000000000..db29417c88c73 --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/generated_expansion.go @@ -0,0 +1,13 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type PodSecurityPolicyReviewExpansion interface{} + +type PodSecurityPolicySelfSubjectReviewExpansion interface{} + +type PodSecurityPolicySubjectReviewExpansion interface{} + +type RangeAllocationExpansion interface{} + +type SecurityContextConstraintsExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/podsecuritypolicyreview.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/podsecuritypolicyreview.go new file mode 100644 index 0000000000000..f92ff7f4f5fda --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/podsecuritypolicyreview.go @@ -0,0 +1,42 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + securityv1 "github.com/openshift/api/security/v1" + scheme "github.com/openshift/client-go/security/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" +) + +// PodSecurityPolicyReviewsGetter has a method to return a PodSecurityPolicyReviewInterface. +// A group's client should implement this interface. +type PodSecurityPolicyReviewsGetter interface { + PodSecurityPolicyReviews(namespace string) PodSecurityPolicyReviewInterface +} + +// PodSecurityPolicyReviewInterface has methods to work with PodSecurityPolicyReview resources. +type PodSecurityPolicyReviewInterface interface { + Create(ctx context.Context, podSecurityPolicyReview *securityv1.PodSecurityPolicyReview, opts metav1.CreateOptions) (*securityv1.PodSecurityPolicyReview, error) + PodSecurityPolicyReviewExpansion +} + +// podSecurityPolicyReviews implements PodSecurityPolicyReviewInterface +type podSecurityPolicyReviews struct { + *gentype.Client[*securityv1.PodSecurityPolicyReview] +} + +// newPodSecurityPolicyReviews returns a PodSecurityPolicyReviews +func newPodSecurityPolicyReviews(c *SecurityV1Client, namespace string) *podSecurityPolicyReviews { + return &podSecurityPolicyReviews{ + gentype.NewClient[*securityv1.PodSecurityPolicyReview]( + "podsecuritypolicyreviews", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *securityv1.PodSecurityPolicyReview { return &securityv1.PodSecurityPolicyReview{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/podsecuritypolicyselfsubjectreview.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/podsecuritypolicyselfsubjectreview.go new file mode 100644 index 0000000000000..47afca0970897 --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/podsecuritypolicyselfsubjectreview.go @@ -0,0 +1,44 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + securityv1 "github.com/openshift/api/security/v1" + scheme "github.com/openshift/client-go/security/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" +) + +// PodSecurityPolicySelfSubjectReviewsGetter has a method to return a PodSecurityPolicySelfSubjectReviewInterface. +// A group's client should implement this interface. +type PodSecurityPolicySelfSubjectReviewsGetter interface { + PodSecurityPolicySelfSubjectReviews(namespace string) PodSecurityPolicySelfSubjectReviewInterface +} + +// PodSecurityPolicySelfSubjectReviewInterface has methods to work with PodSecurityPolicySelfSubjectReview resources. +type PodSecurityPolicySelfSubjectReviewInterface interface { + Create(ctx context.Context, podSecurityPolicySelfSubjectReview *securityv1.PodSecurityPolicySelfSubjectReview, opts metav1.CreateOptions) (*securityv1.PodSecurityPolicySelfSubjectReview, error) + PodSecurityPolicySelfSubjectReviewExpansion +} + +// podSecurityPolicySelfSubjectReviews implements PodSecurityPolicySelfSubjectReviewInterface +type podSecurityPolicySelfSubjectReviews struct { + *gentype.Client[*securityv1.PodSecurityPolicySelfSubjectReview] +} + +// newPodSecurityPolicySelfSubjectReviews returns a PodSecurityPolicySelfSubjectReviews +func newPodSecurityPolicySelfSubjectReviews(c *SecurityV1Client, namespace string) *podSecurityPolicySelfSubjectReviews { + return &podSecurityPolicySelfSubjectReviews{ + gentype.NewClient[*securityv1.PodSecurityPolicySelfSubjectReview]( + "podsecuritypolicyselfsubjectreviews", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *securityv1.PodSecurityPolicySelfSubjectReview { + return &securityv1.PodSecurityPolicySelfSubjectReview{} + }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/podsecuritypolicysubjectreview.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/podsecuritypolicysubjectreview.go new file mode 100644 index 0000000000000..ebbcd8cb6d2a0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/podsecuritypolicysubjectreview.go @@ -0,0 +1,42 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + securityv1 "github.com/openshift/api/security/v1" + scheme "github.com/openshift/client-go/security/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" +) + +// PodSecurityPolicySubjectReviewsGetter has a method to return a PodSecurityPolicySubjectReviewInterface. +// A group's client should implement this interface. +type PodSecurityPolicySubjectReviewsGetter interface { + PodSecurityPolicySubjectReviews(namespace string) PodSecurityPolicySubjectReviewInterface +} + +// PodSecurityPolicySubjectReviewInterface has methods to work with PodSecurityPolicySubjectReview resources. +type PodSecurityPolicySubjectReviewInterface interface { + Create(ctx context.Context, podSecurityPolicySubjectReview *securityv1.PodSecurityPolicySubjectReview, opts metav1.CreateOptions) (*securityv1.PodSecurityPolicySubjectReview, error) + PodSecurityPolicySubjectReviewExpansion +} + +// podSecurityPolicySubjectReviews implements PodSecurityPolicySubjectReviewInterface +type podSecurityPolicySubjectReviews struct { + *gentype.Client[*securityv1.PodSecurityPolicySubjectReview] +} + +// newPodSecurityPolicySubjectReviews returns a PodSecurityPolicySubjectReviews +func newPodSecurityPolicySubjectReviews(c *SecurityV1Client, namespace string) *podSecurityPolicySubjectReviews { + return &podSecurityPolicySubjectReviews{ + gentype.NewClient[*securityv1.PodSecurityPolicySubjectReview]( + "podsecuritypolicysubjectreviews", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *securityv1.PodSecurityPolicySubjectReview { return &securityv1.PodSecurityPolicySubjectReview{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/rangeallocation.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/rangeallocation.go new file mode 100644 index 0000000000000..b4469ddff51b2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/rangeallocation.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + securityv1 "github.com/openshift/api/security/v1" + applyconfigurationssecurityv1 "github.com/openshift/client-go/security/applyconfigurations/security/v1" + scheme "github.com/openshift/client-go/security/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// RangeAllocationsGetter has a method to return a RangeAllocationInterface. +// A group's client should implement this interface. +type RangeAllocationsGetter interface { + RangeAllocations() RangeAllocationInterface +} + +// RangeAllocationInterface has methods to work with RangeAllocation resources. +type RangeAllocationInterface interface { + Create(ctx context.Context, rangeAllocation *securityv1.RangeAllocation, opts metav1.CreateOptions) (*securityv1.RangeAllocation, error) + Update(ctx context.Context, rangeAllocation *securityv1.RangeAllocation, opts metav1.UpdateOptions) (*securityv1.RangeAllocation, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*securityv1.RangeAllocation, error) + List(ctx context.Context, opts metav1.ListOptions) (*securityv1.RangeAllocationList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *securityv1.RangeAllocation, err error) + Apply(ctx context.Context, rangeAllocation *applyconfigurationssecurityv1.RangeAllocationApplyConfiguration, opts metav1.ApplyOptions) (result *securityv1.RangeAllocation, err error) + RangeAllocationExpansion +} + +// rangeAllocations implements RangeAllocationInterface +type rangeAllocations struct { + *gentype.ClientWithListAndApply[*securityv1.RangeAllocation, *securityv1.RangeAllocationList, *applyconfigurationssecurityv1.RangeAllocationApplyConfiguration] +} + +// newRangeAllocations returns a RangeAllocations +func newRangeAllocations(c *SecurityV1Client) *rangeAllocations { + return &rangeAllocations{ + gentype.NewClientWithListAndApply[*securityv1.RangeAllocation, *securityv1.RangeAllocationList, *applyconfigurationssecurityv1.RangeAllocationApplyConfiguration]( + "rangeallocations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *securityv1.RangeAllocation { return &securityv1.RangeAllocation{} }, + func() *securityv1.RangeAllocationList { return &securityv1.RangeAllocationList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/security_client.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/security_client.go new file mode 100644 index 0000000000000..0fe95d0280761 --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/security_client.go @@ -0,0 +1,111 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + http "net/http" + + securityv1 "github.com/openshift/api/security/v1" + scheme "github.com/openshift/client-go/security/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type SecurityV1Interface interface { + RESTClient() rest.Interface + PodSecurityPolicyReviewsGetter + PodSecurityPolicySelfSubjectReviewsGetter + PodSecurityPolicySubjectReviewsGetter + RangeAllocationsGetter + SecurityContextConstraintsGetter +} + +// SecurityV1Client is used to interact with features provided by the security.openshift.io group. +type SecurityV1Client struct { + restClient rest.Interface +} + +func (c *SecurityV1Client) PodSecurityPolicyReviews(namespace string) PodSecurityPolicyReviewInterface { + return newPodSecurityPolicyReviews(c, namespace) +} + +func (c *SecurityV1Client) PodSecurityPolicySelfSubjectReviews(namespace string) PodSecurityPolicySelfSubjectReviewInterface { + return newPodSecurityPolicySelfSubjectReviews(c, namespace) +} + +func (c *SecurityV1Client) PodSecurityPolicySubjectReviews(namespace string) PodSecurityPolicySubjectReviewInterface { + return newPodSecurityPolicySubjectReviews(c, namespace) +} + +func (c *SecurityV1Client) RangeAllocations() RangeAllocationInterface { + return newRangeAllocations(c) +} + +func (c *SecurityV1Client) SecurityContextConstraints() SecurityContextConstraintsInterface { + return newSecurityContextConstraints(c) +} + +// NewForConfig creates a new SecurityV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*SecurityV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new SecurityV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*SecurityV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &SecurityV1Client{client}, nil +} + +// NewForConfigOrDie creates a new SecurityV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *SecurityV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new SecurityV1Client for the given RESTClient. +func New(c rest.Interface) *SecurityV1Client { + return &SecurityV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := securityv1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *SecurityV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/securitycontextconstraints.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/securitycontextconstraints.go new file mode 100644 index 0000000000000..82eda438d1295 --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/securitycontextconstraints.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + securityv1 "github.com/openshift/api/security/v1" + applyconfigurationssecurityv1 "github.com/openshift/client-go/security/applyconfigurations/security/v1" + scheme "github.com/openshift/client-go/security/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// SecurityContextConstraintsGetter has a method to return a SecurityContextConstraintsInterface. +// A group's client should implement this interface. +type SecurityContextConstraintsGetter interface { + SecurityContextConstraints() SecurityContextConstraintsInterface +} + +// SecurityContextConstraintsInterface has methods to work with SecurityContextConstraints resources. +type SecurityContextConstraintsInterface interface { + Create(ctx context.Context, securityContextConstraints *securityv1.SecurityContextConstraints, opts metav1.CreateOptions) (*securityv1.SecurityContextConstraints, error) + Update(ctx context.Context, securityContextConstraints *securityv1.SecurityContextConstraints, opts metav1.UpdateOptions) (*securityv1.SecurityContextConstraints, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*securityv1.SecurityContextConstraints, error) + List(ctx context.Context, opts metav1.ListOptions) (*securityv1.SecurityContextConstraintsList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *securityv1.SecurityContextConstraints, err error) + Apply(ctx context.Context, securityContextConstraints *applyconfigurationssecurityv1.SecurityContextConstraintsApplyConfiguration, opts metav1.ApplyOptions) (result *securityv1.SecurityContextConstraints, err error) + SecurityContextConstraintsExpansion +} + +// securityContextConstraints implements SecurityContextConstraintsInterface +type securityContextConstraints struct { + *gentype.ClientWithListAndApply[*securityv1.SecurityContextConstraints, *securityv1.SecurityContextConstraintsList, *applyconfigurationssecurityv1.SecurityContextConstraintsApplyConfiguration] +} + +// newSecurityContextConstraints returns a SecurityContextConstraints +func newSecurityContextConstraints(c *SecurityV1Client) *securityContextConstraints { + return &securityContextConstraints{ + gentype.NewClientWithListAndApply[*securityv1.SecurityContextConstraints, *securityv1.SecurityContextConstraintsList, *applyconfigurationssecurityv1.SecurityContextConstraintsApplyConfiguration]( + "securitycontextconstraints", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *securityv1.SecurityContextConstraints { return &securityv1.SecurityContextConstraints{} }, + func() *securityv1.SecurityContextConstraintsList { return &securityv1.SecurityContextConstraintsList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/security/informers/externalversions/factory.go b/vendor/github.com/openshift/client-go/security/informers/externalversions/factory.go new file mode 100644 index 0000000000000..1d5116f2feaca --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/informers/externalversions/factory.go @@ -0,0 +1,246 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/openshift/client-go/security/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/security/informers/externalversions/internalinterfaces" + security "github.com/openshift/client-go/security/informers/externalversions/security" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + transform cache.TransformFunc + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.shuttingDown { + return + } + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() + f.startedInformers[informerType] = true + } + } +} + +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + + Security() security.Interface +} + +func (f *sharedInformerFactory) Security() security.Interface { + return security.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/security/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/security/informers/externalversions/generic.go new file mode 100644 index 0000000000000..729e6666f2b48 --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/informers/externalversions/generic.go @@ -0,0 +1,48 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + fmt "fmt" + + v1 "github.com/openshift/api/security/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=security.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("rangeallocations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Security().V1().RangeAllocations().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("securitycontextconstraints"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Security().V1().SecurityContextConstraints().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/openshift/client-go/security/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/openshift/client-go/security/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000000..6977ed09aaf81 --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,24 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/openshift/client-go/security/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/openshift/client-go/security/informers/externalversions/security/interface.go b/vendor/github.com/openshift/client-go/security/informers/externalversions/security/interface.go new file mode 100644 index 0000000000000..f83962861f658 --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/informers/externalversions/security/interface.go @@ -0,0 +1,30 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package security + +import ( + internalinterfaces "github.com/openshift/client-go/security/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/security/informers/externalversions/security/v1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/security/informers/externalversions/security/v1/interface.go b/vendor/github.com/openshift/client-go/security/informers/externalversions/security/v1/interface.go new file mode 100644 index 0000000000000..a5e60e5cab7d6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/informers/externalversions/security/v1/interface.go @@ -0,0 +1,36 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/openshift/client-go/security/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // RangeAllocations returns a RangeAllocationInformer. + RangeAllocations() RangeAllocationInformer + // SecurityContextConstraints returns a SecurityContextConstraintsInformer. + SecurityContextConstraints() SecurityContextConstraintsInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// RangeAllocations returns a RangeAllocationInformer. +func (v *version) RangeAllocations() RangeAllocationInformer { + return &rangeAllocationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// SecurityContextConstraints returns a SecurityContextConstraintsInformer. +func (v *version) SecurityContextConstraints() SecurityContextConstraintsInformer { + return &securityContextConstraintsInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/security/informers/externalversions/security/v1/rangeallocation.go b/vendor/github.com/openshift/client-go/security/informers/externalversions/security/v1/rangeallocation.go new file mode 100644 index 0000000000000..34adaf89ded58 --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/informers/externalversions/security/v1/rangeallocation.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apisecurityv1 "github.com/openshift/api/security/v1" + versioned "github.com/openshift/client-go/security/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/security/informers/externalversions/internalinterfaces" + securityv1 "github.com/openshift/client-go/security/listers/security/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// RangeAllocationInformer provides access to a shared informer and lister for +// RangeAllocations. +type RangeAllocationInformer interface { + Informer() cache.SharedIndexInformer + Lister() securityv1.RangeAllocationLister +} + +type rangeAllocationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewRangeAllocationInformer constructs a new informer for RangeAllocation type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRangeAllocationInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRangeAllocationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredRangeAllocationInformer constructs a new informer for RangeAllocation type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRangeAllocationInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SecurityV1().RangeAllocations().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SecurityV1().RangeAllocations().Watch(context.TODO(), options) + }, + }, + &apisecurityv1.RangeAllocation{}, + resyncPeriod, + indexers, + ) +} + +func (f *rangeAllocationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRangeAllocationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *rangeAllocationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apisecurityv1.RangeAllocation{}, f.defaultInformer) +} + +func (f *rangeAllocationInformer) Lister() securityv1.RangeAllocationLister { + return securityv1.NewRangeAllocationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/security/informers/externalversions/security/v1/securitycontextconstraints.go b/vendor/github.com/openshift/client-go/security/informers/externalversions/security/v1/securitycontextconstraints.go new file mode 100644 index 0000000000000..5e7e6c98203e1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/informers/externalversions/security/v1/securitycontextconstraints.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apisecurityv1 "github.com/openshift/api/security/v1" + versioned "github.com/openshift/client-go/security/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/security/informers/externalversions/internalinterfaces" + securityv1 "github.com/openshift/client-go/security/listers/security/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// SecurityContextConstraintsInformer provides access to a shared informer and lister for +// SecurityContextConstraints. +type SecurityContextConstraintsInformer interface { + Informer() cache.SharedIndexInformer + Lister() securityv1.SecurityContextConstraintsLister +} + +type securityContextConstraintsInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewSecurityContextConstraintsInformer constructs a new informer for SecurityContextConstraints type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewSecurityContextConstraintsInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredSecurityContextConstraintsInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredSecurityContextConstraintsInformer constructs a new informer for SecurityContextConstraints type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredSecurityContextConstraintsInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SecurityV1().SecurityContextConstraints().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SecurityV1().SecurityContextConstraints().Watch(context.TODO(), options) + }, + }, + &apisecurityv1.SecurityContextConstraints{}, + resyncPeriod, + indexers, + ) +} + +func (f *securityContextConstraintsInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredSecurityContextConstraintsInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *securityContextConstraintsInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apisecurityv1.SecurityContextConstraints{}, f.defaultInformer) +} + +func (f *securityContextConstraintsInformer) Lister() securityv1.SecurityContextConstraintsLister { + return securityv1.NewSecurityContextConstraintsLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/security/listers/security/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/security/listers/security/v1/expansion_generated.go new file mode 100644 index 0000000000000..fa088f154096d --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/listers/security/v1/expansion_generated.go @@ -0,0 +1,11 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// RangeAllocationListerExpansion allows custom methods to be added to +// RangeAllocationLister. +type RangeAllocationListerExpansion interface{} + +// SecurityContextConstraintsListerExpansion allows custom methods to be added to +// SecurityContextConstraintsLister. +type SecurityContextConstraintsListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/security/listers/security/v1/rangeallocation.go b/vendor/github.com/openshift/client-go/security/listers/security/v1/rangeallocation.go new file mode 100644 index 0000000000000..bb185f4a7f593 --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/listers/security/v1/rangeallocation.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + securityv1 "github.com/openshift/api/security/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// RangeAllocationLister helps list RangeAllocations. +// All objects returned here must be treated as read-only. +type RangeAllocationLister interface { + // List lists all RangeAllocations in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*securityv1.RangeAllocation, err error) + // Get retrieves the RangeAllocation from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*securityv1.RangeAllocation, error) + RangeAllocationListerExpansion +} + +// rangeAllocationLister implements the RangeAllocationLister interface. +type rangeAllocationLister struct { + listers.ResourceIndexer[*securityv1.RangeAllocation] +} + +// NewRangeAllocationLister returns a new RangeAllocationLister. +func NewRangeAllocationLister(indexer cache.Indexer) RangeAllocationLister { + return &rangeAllocationLister{listers.New[*securityv1.RangeAllocation](indexer, securityv1.Resource("rangeallocation"))} +} diff --git a/vendor/github.com/openshift/client-go/security/listers/security/v1/securitycontextconstraints.go b/vendor/github.com/openshift/client-go/security/listers/security/v1/securitycontextconstraints.go new file mode 100644 index 0000000000000..f27825633daae --- /dev/null +++ b/vendor/github.com/openshift/client-go/security/listers/security/v1/securitycontextconstraints.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + securityv1 "github.com/openshift/api/security/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// SecurityContextConstraintsLister helps list SecurityContextConstraints. +// All objects returned here must be treated as read-only. +type SecurityContextConstraintsLister interface { + // List lists all SecurityContextConstraints in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*securityv1.SecurityContextConstraints, err error) + // Get retrieves the SecurityContextConstraints from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*securityv1.SecurityContextConstraints, error) + SecurityContextConstraintsListerExpansion +} + +// securityContextConstraintsLister implements the SecurityContextConstraintsLister interface. +type securityContextConstraintsLister struct { + listers.ResourceIndexer[*securityv1.SecurityContextConstraints] +} + +// NewSecurityContextConstraintsLister returns a new SecurityContextConstraintsLister. +func NewSecurityContextConstraintsLister(indexer cache.Indexer) SecurityContextConstraintsLister { + return &securityContextConstraintsLister{listers.New[*securityv1.SecurityContextConstraints](indexer, securityv1.Resource("securitycontextconstraints"))} +} diff --git a/vendor/github.com/openshift/client-go/template/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/template/applyconfigurations/internal/internal.go new file mode 100644 index 0000000000000..11da09f323ad8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/applyconfigurations/internal/internal.go @@ -0,0 +1,411 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package internal + +import ( + fmt "fmt" + sync "sync" + + typed "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +func Parser() *typed.Parser { + parserOnce.Do(func() { + var err error + parser, err = typed.NewParser(schemaYAML) + if err != nil { + panic(fmt.Sprintf("Failed to parse schema: %v", err)) + } + }) + return parser +} + +var parserOnce sync.Once +var parser *typed.Parser +var schemaYAML = typed.YAMLObject(`types: +- name: com.github.openshift.api.template.v1.BrokerTemplateInstance + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.template.v1.BrokerTemplateInstanceSpec + default: {} +- name: com.github.openshift.api.template.v1.BrokerTemplateInstanceSpec + map: + fields: + - name: bindingIDs + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: secret + type: + namedType: io.k8s.api.core.v1.ObjectReference + default: {} + - name: templateInstance + type: + namedType: io.k8s.api.core.v1.ObjectReference + default: {} +- name: com.github.openshift.api.template.v1.Parameter + map: + fields: + - name: description + type: + scalar: string + - name: displayName + type: + scalar: string + - name: from + type: + scalar: string + - name: generate + type: + scalar: string + - name: name + type: + scalar: string + default: "" + - name: required + type: + scalar: boolean + - name: value + type: + scalar: string +- name: com.github.openshift.api.template.v1.Template + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: labels + type: + map: + elementType: + scalar: string + - name: message + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: objects + type: + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + - name: parameters + type: + list: + elementType: + namedType: com.github.openshift.api.template.v1.Parameter + elementRelationship: atomic +- name: com.github.openshift.api.template.v1.TemplateInstance + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.template.v1.TemplateInstanceSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.template.v1.TemplateInstanceStatus + default: {} +- name: com.github.openshift.api.template.v1.TemplateInstanceCondition + map: + fields: + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message + type: + scalar: string + default: "" + - name: reason + type: + scalar: string + default: "" + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.template.v1.TemplateInstanceObject + map: + fields: + - name: ref + type: + namedType: io.k8s.api.core.v1.ObjectReference + default: {} +- name: com.github.openshift.api.template.v1.TemplateInstanceRequester + map: + fields: + - name: extra + type: + map: + elementType: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: groups + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: uid + type: + scalar: string + - name: username + type: + scalar: string +- name: com.github.openshift.api.template.v1.TemplateInstanceSpec + map: + fields: + - name: requester + type: + namedType: com.github.openshift.api.template.v1.TemplateInstanceRequester + - name: secret + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: template + type: + namedType: com.github.openshift.api.template.v1.Template + default: {} +- name: com.github.openshift.api.template.v1.TemplateInstanceStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.template.v1.TemplateInstanceCondition + elementRelationship: atomic + - name: objects + type: + list: + elementType: + namedType: com.github.openshift.api.template.v1.TemplateInstanceObject + elementRelationship: atomic +- name: io.k8s.api.core.v1.LocalObjectReference + map: + fields: + - name: name + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.api.core.v1.ObjectReference + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldPath + type: + scalar: string + - name: kind + type: + scalar: string + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: resourceVersion + type: + scalar: string + - name: uid + type: + scalar: string + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldsType + type: + scalar: string + - name: fieldsV1 + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + - name: manager + type: + scalar: string + - name: operation + type: + scalar: string + - name: subresource + type: + scalar: string + - name: time + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + map: + fields: + - name: annotations + type: + map: + elementType: + scalar: string + - name: creationTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: deletionGracePeriodSeconds + type: + scalar: numeric + - name: deletionTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: finalizers + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: generateName + type: + scalar: string + - name: generation + type: + scalar: numeric + - name: labels + type: + map: + elementType: + scalar: string + - name: managedFields + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + elementRelationship: atomic + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: ownerReferences + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + elementRelationship: associative + keys: + - uid + - name: resourceVersion + type: + scalar: string + - name: selfLink + type: + scalar: string + - name: uid + type: + scalar: string +- name: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + map: + fields: + - name: apiVersion + type: + scalar: string + default: "" + - name: blockOwnerDeletion + type: + scalar: boolean + - name: controller + type: + scalar: boolean + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: uid + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Time + scalar: untyped +- name: io.k8s.apimachinery.pkg.runtime.RawExtension + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +- name: __untyped_deduced_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +`) diff --git a/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/brokertemplateinstance.go b/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/brokertemplateinstance.go new file mode 100644 index 0000000000000..837c2831c4d9b --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/brokertemplateinstance.go @@ -0,0 +1,237 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + templatev1 "github.com/openshift/api/template/v1" + internal "github.com/openshift/client-go/template/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// BrokerTemplateInstanceApplyConfiguration represents a declarative configuration of the BrokerTemplateInstance type for use +// with apply. +type BrokerTemplateInstanceApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *BrokerTemplateInstanceSpecApplyConfiguration `json:"spec,omitempty"` +} + +// BrokerTemplateInstance constructs a declarative configuration of the BrokerTemplateInstance type for use with +// apply. +func BrokerTemplateInstance(name string) *BrokerTemplateInstanceApplyConfiguration { + b := &BrokerTemplateInstanceApplyConfiguration{} + b.WithName(name) + b.WithKind("BrokerTemplateInstance") + b.WithAPIVersion("template.openshift.io/v1") + return b +} + +// ExtractBrokerTemplateInstance extracts the applied configuration owned by fieldManager from +// brokerTemplateInstance. If no managedFields are found in brokerTemplateInstance for fieldManager, a +// BrokerTemplateInstanceApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// brokerTemplateInstance must be a unmodified BrokerTemplateInstance API object that was retrieved from the Kubernetes API. +// ExtractBrokerTemplateInstance provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractBrokerTemplateInstance(brokerTemplateInstance *templatev1.BrokerTemplateInstance, fieldManager string) (*BrokerTemplateInstanceApplyConfiguration, error) { + return extractBrokerTemplateInstance(brokerTemplateInstance, fieldManager, "") +} + +// ExtractBrokerTemplateInstanceStatus is the same as ExtractBrokerTemplateInstance except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractBrokerTemplateInstanceStatus(brokerTemplateInstance *templatev1.BrokerTemplateInstance, fieldManager string) (*BrokerTemplateInstanceApplyConfiguration, error) { + return extractBrokerTemplateInstance(brokerTemplateInstance, fieldManager, "status") +} + +func extractBrokerTemplateInstance(brokerTemplateInstance *templatev1.BrokerTemplateInstance, fieldManager string, subresource string) (*BrokerTemplateInstanceApplyConfiguration, error) { + b := &BrokerTemplateInstanceApplyConfiguration{} + err := managedfields.ExtractInto(brokerTemplateInstance, internal.Parser().Type("com.github.openshift.api.template.v1.BrokerTemplateInstance"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(brokerTemplateInstance.Name) + + b.WithKind("BrokerTemplateInstance") + b.WithAPIVersion("template.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *BrokerTemplateInstanceApplyConfiguration) WithKind(value string) *BrokerTemplateInstanceApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *BrokerTemplateInstanceApplyConfiguration) WithAPIVersion(value string) *BrokerTemplateInstanceApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *BrokerTemplateInstanceApplyConfiguration) WithName(value string) *BrokerTemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *BrokerTemplateInstanceApplyConfiguration) WithGenerateName(value string) *BrokerTemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *BrokerTemplateInstanceApplyConfiguration) WithNamespace(value string) *BrokerTemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *BrokerTemplateInstanceApplyConfiguration) WithUID(value types.UID) *BrokerTemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *BrokerTemplateInstanceApplyConfiguration) WithResourceVersion(value string) *BrokerTemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *BrokerTemplateInstanceApplyConfiguration) WithGeneration(value int64) *BrokerTemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *BrokerTemplateInstanceApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *BrokerTemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *BrokerTemplateInstanceApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *BrokerTemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *BrokerTemplateInstanceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *BrokerTemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *BrokerTemplateInstanceApplyConfiguration) WithLabels(entries map[string]string) *BrokerTemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *BrokerTemplateInstanceApplyConfiguration) WithAnnotations(entries map[string]string) *BrokerTemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *BrokerTemplateInstanceApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *BrokerTemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *BrokerTemplateInstanceApplyConfiguration) WithFinalizers(values ...string) *BrokerTemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *BrokerTemplateInstanceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *BrokerTemplateInstanceApplyConfiguration) WithSpec(value *BrokerTemplateInstanceSpecApplyConfiguration) *BrokerTemplateInstanceApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *BrokerTemplateInstanceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/brokertemplateinstancespec.go b/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/brokertemplateinstancespec.go new file mode 100644 index 0000000000000..e65b74928c5ba --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/brokertemplateinstancespec.go @@ -0,0 +1,47 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// BrokerTemplateInstanceSpecApplyConfiguration represents a declarative configuration of the BrokerTemplateInstanceSpec type for use +// with apply. +type BrokerTemplateInstanceSpecApplyConfiguration struct { + TemplateInstance *corev1.ObjectReference `json:"templateInstance,omitempty"` + Secret *corev1.ObjectReference `json:"secret,omitempty"` + BindingIDs []string `json:"bindingIDs,omitempty"` +} + +// BrokerTemplateInstanceSpecApplyConfiguration constructs a declarative configuration of the BrokerTemplateInstanceSpec type for use with +// apply. +func BrokerTemplateInstanceSpec() *BrokerTemplateInstanceSpecApplyConfiguration { + return &BrokerTemplateInstanceSpecApplyConfiguration{} +} + +// WithTemplateInstance sets the TemplateInstance field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TemplateInstance field is set to the value of the last call. +func (b *BrokerTemplateInstanceSpecApplyConfiguration) WithTemplateInstance(value corev1.ObjectReference) *BrokerTemplateInstanceSpecApplyConfiguration { + b.TemplateInstance = &value + return b +} + +// WithSecret sets the Secret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Secret field is set to the value of the last call. +func (b *BrokerTemplateInstanceSpecApplyConfiguration) WithSecret(value corev1.ObjectReference) *BrokerTemplateInstanceSpecApplyConfiguration { + b.Secret = &value + return b +} + +// WithBindingIDs adds the given value to the BindingIDs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the BindingIDs field. +func (b *BrokerTemplateInstanceSpecApplyConfiguration) WithBindingIDs(values ...string) *BrokerTemplateInstanceSpecApplyConfiguration { + for i := range values { + b.BindingIDs = append(b.BindingIDs, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/parameter.go b/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/parameter.go new file mode 100644 index 0000000000000..963d343e9e24c --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/parameter.go @@ -0,0 +1,77 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ParameterApplyConfiguration represents a declarative configuration of the Parameter type for use +// with apply. +type ParameterApplyConfiguration struct { + Name *string `json:"name,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + Description *string `json:"description,omitempty"` + Value *string `json:"value,omitempty"` + Generate *string `json:"generate,omitempty"` + From *string `json:"from,omitempty"` + Required *bool `json:"required,omitempty"` +} + +// ParameterApplyConfiguration constructs a declarative configuration of the Parameter type for use with +// apply. +func Parameter() *ParameterApplyConfiguration { + return &ParameterApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ParameterApplyConfiguration) WithName(value string) *ParameterApplyConfiguration { + b.Name = &value + return b +} + +// WithDisplayName sets the DisplayName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DisplayName field is set to the value of the last call. +func (b *ParameterApplyConfiguration) WithDisplayName(value string) *ParameterApplyConfiguration { + b.DisplayName = &value + return b +} + +// WithDescription sets the Description field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Description field is set to the value of the last call. +func (b *ParameterApplyConfiguration) WithDescription(value string) *ParameterApplyConfiguration { + b.Description = &value + return b +} + +// WithValue sets the Value field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Value field is set to the value of the last call. +func (b *ParameterApplyConfiguration) WithValue(value string) *ParameterApplyConfiguration { + b.Value = &value + return b +} + +// WithGenerate sets the Generate field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generate field is set to the value of the last call. +func (b *ParameterApplyConfiguration) WithGenerate(value string) *ParameterApplyConfiguration { + b.Generate = &value + return b +} + +// WithFrom sets the From field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the From field is set to the value of the last call. +func (b *ParameterApplyConfiguration) WithFrom(value string) *ParameterApplyConfiguration { + b.From = &value + return b +} + +// WithRequired sets the Required field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Required field is set to the value of the last call. +func (b *ParameterApplyConfiguration) WithRequired(value bool) *ParameterApplyConfiguration { + b.Required = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/template.go b/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/template.go new file mode 100644 index 0000000000000..8ae1ba0d56a4e --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/template.go @@ -0,0 +1,280 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + templatev1 "github.com/openshift/api/template/v1" + internal "github.com/openshift/client-go/template/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// TemplateApplyConfiguration represents a declarative configuration of the Template type for use +// with apply. +type TemplateApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Message *string `json:"message,omitempty"` + Objects []runtime.RawExtension `json:"objects,omitempty"` + Parameters []ParameterApplyConfiguration `json:"parameters,omitempty"` + ObjectLabels map[string]string `json:"labels,omitempty"` +} + +// Template constructs a declarative configuration of the Template type for use with +// apply. +func Template(name, namespace string) *TemplateApplyConfiguration { + b := &TemplateApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("Template") + b.WithAPIVersion("template.openshift.io/v1") + return b +} + +// ExtractTemplate extracts the applied configuration owned by fieldManager from +// template. If no managedFields are found in template for fieldManager, a +// TemplateApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// template must be a unmodified Template API object that was retrieved from the Kubernetes API. +// ExtractTemplate provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractTemplate(template *templatev1.Template, fieldManager string) (*TemplateApplyConfiguration, error) { + return extractTemplate(template, fieldManager, "") +} + +// ExtractTemplateStatus is the same as ExtractTemplate except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractTemplateStatus(template *templatev1.Template, fieldManager string) (*TemplateApplyConfiguration, error) { + return extractTemplate(template, fieldManager, "status") +} + +func extractTemplate(template *templatev1.Template, fieldManager string, subresource string) (*TemplateApplyConfiguration, error) { + b := &TemplateApplyConfiguration{} + err := managedfields.ExtractInto(template, internal.Parser().Type("com.github.openshift.api.template.v1.Template"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(template.Name) + b.WithNamespace(template.Namespace) + + b.WithKind("Template") + b.WithAPIVersion("template.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *TemplateApplyConfiguration) WithKind(value string) *TemplateApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *TemplateApplyConfiguration) WithAPIVersion(value string) *TemplateApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *TemplateApplyConfiguration) WithName(value string) *TemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *TemplateApplyConfiguration) WithGenerateName(value string) *TemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *TemplateApplyConfiguration) WithNamespace(value string) *TemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *TemplateApplyConfiguration) WithUID(value types.UID) *TemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *TemplateApplyConfiguration) WithResourceVersion(value string) *TemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *TemplateApplyConfiguration) WithGeneration(value int64) *TemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *TemplateApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *TemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *TemplateApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *TemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *TemplateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *TemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *TemplateApplyConfiguration) WithLabels(entries map[string]string) *TemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *TemplateApplyConfiguration) WithAnnotations(entries map[string]string) *TemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *TemplateApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *TemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *TemplateApplyConfiguration) WithFinalizers(values ...string) *TemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *TemplateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *TemplateApplyConfiguration) WithMessage(value string) *TemplateApplyConfiguration { + b.Message = &value + return b +} + +// WithObjects adds the given value to the Objects field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Objects field. +func (b *TemplateApplyConfiguration) WithObjects(values ...runtime.RawExtension) *TemplateApplyConfiguration { + for i := range values { + b.Objects = append(b.Objects, values[i]) + } + return b +} + +// WithParameters adds the given value to the Parameters field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Parameters field. +func (b *TemplateApplyConfiguration) WithParameters(values ...*ParameterApplyConfiguration) *TemplateApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithParameters") + } + b.Parameters = append(b.Parameters, *values[i]) + } + return b +} + +// WithObjectLabels puts the entries into the ObjectLabels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the ObjectLabels field, +// overwriting an existing map entries in ObjectLabels field with the same key. +func (b *TemplateApplyConfiguration) WithObjectLabels(entries map[string]string) *TemplateApplyConfiguration { + if b.ObjectLabels == nil && len(entries) > 0 { + b.ObjectLabels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectLabels[k] = v + } + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *TemplateApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/templateinstance.go b/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/templateinstance.go new file mode 100644 index 0000000000000..caea8e334c187 --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/templateinstance.go @@ -0,0 +1,248 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + templatev1 "github.com/openshift/api/template/v1" + internal "github.com/openshift/client-go/template/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// TemplateInstanceApplyConfiguration represents a declarative configuration of the TemplateInstance type for use +// with apply. +type TemplateInstanceApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *TemplateInstanceSpecApplyConfiguration `json:"spec,omitempty"` + Status *TemplateInstanceStatusApplyConfiguration `json:"status,omitempty"` +} + +// TemplateInstance constructs a declarative configuration of the TemplateInstance type for use with +// apply. +func TemplateInstance(name, namespace string) *TemplateInstanceApplyConfiguration { + b := &TemplateInstanceApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("TemplateInstance") + b.WithAPIVersion("template.openshift.io/v1") + return b +} + +// ExtractTemplateInstance extracts the applied configuration owned by fieldManager from +// templateInstance. If no managedFields are found in templateInstance for fieldManager, a +// TemplateInstanceApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// templateInstance must be a unmodified TemplateInstance API object that was retrieved from the Kubernetes API. +// ExtractTemplateInstance provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractTemplateInstance(templateInstance *templatev1.TemplateInstance, fieldManager string) (*TemplateInstanceApplyConfiguration, error) { + return extractTemplateInstance(templateInstance, fieldManager, "") +} + +// ExtractTemplateInstanceStatus is the same as ExtractTemplateInstance except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractTemplateInstanceStatus(templateInstance *templatev1.TemplateInstance, fieldManager string) (*TemplateInstanceApplyConfiguration, error) { + return extractTemplateInstance(templateInstance, fieldManager, "status") +} + +func extractTemplateInstance(templateInstance *templatev1.TemplateInstance, fieldManager string, subresource string) (*TemplateInstanceApplyConfiguration, error) { + b := &TemplateInstanceApplyConfiguration{} + err := managedfields.ExtractInto(templateInstance, internal.Parser().Type("com.github.openshift.api.template.v1.TemplateInstance"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(templateInstance.Name) + b.WithNamespace(templateInstance.Namespace) + + b.WithKind("TemplateInstance") + b.WithAPIVersion("template.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *TemplateInstanceApplyConfiguration) WithKind(value string) *TemplateInstanceApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *TemplateInstanceApplyConfiguration) WithAPIVersion(value string) *TemplateInstanceApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *TemplateInstanceApplyConfiguration) WithName(value string) *TemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *TemplateInstanceApplyConfiguration) WithGenerateName(value string) *TemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *TemplateInstanceApplyConfiguration) WithNamespace(value string) *TemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *TemplateInstanceApplyConfiguration) WithUID(value types.UID) *TemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *TemplateInstanceApplyConfiguration) WithResourceVersion(value string) *TemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *TemplateInstanceApplyConfiguration) WithGeneration(value int64) *TemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *TemplateInstanceApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *TemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *TemplateInstanceApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *TemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *TemplateInstanceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *TemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *TemplateInstanceApplyConfiguration) WithLabels(entries map[string]string) *TemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *TemplateInstanceApplyConfiguration) WithAnnotations(entries map[string]string) *TemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *TemplateInstanceApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *TemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *TemplateInstanceApplyConfiguration) WithFinalizers(values ...string) *TemplateInstanceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *TemplateInstanceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *TemplateInstanceApplyConfiguration) WithSpec(value *TemplateInstanceSpecApplyConfiguration) *TemplateInstanceApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *TemplateInstanceApplyConfiguration) WithStatus(value *TemplateInstanceStatusApplyConfiguration) *TemplateInstanceApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *TemplateInstanceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/templateinstancecondition.go b/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/templateinstancecondition.go new file mode 100644 index 0000000000000..564bc8023c20f --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/templateinstancecondition.go @@ -0,0 +1,65 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + templatev1 "github.com/openshift/api/template/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TemplateInstanceConditionApplyConfiguration represents a declarative configuration of the TemplateInstanceCondition type for use +// with apply. +type TemplateInstanceConditionApplyConfiguration struct { + Type *templatev1.TemplateInstanceConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// TemplateInstanceConditionApplyConfiguration constructs a declarative configuration of the TemplateInstanceCondition type for use with +// apply. +func TemplateInstanceCondition() *TemplateInstanceConditionApplyConfiguration { + return &TemplateInstanceConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *TemplateInstanceConditionApplyConfiguration) WithType(value templatev1.TemplateInstanceConditionType) *TemplateInstanceConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *TemplateInstanceConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *TemplateInstanceConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *TemplateInstanceConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *TemplateInstanceConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *TemplateInstanceConditionApplyConfiguration) WithReason(value string) *TemplateInstanceConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *TemplateInstanceConditionApplyConfiguration) WithMessage(value string) *TemplateInstanceConditionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/templateinstanceobject.go b/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/templateinstanceobject.go new file mode 100644 index 0000000000000..57fd153169642 --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/templateinstanceobject.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// TemplateInstanceObjectApplyConfiguration represents a declarative configuration of the TemplateInstanceObject type for use +// with apply. +type TemplateInstanceObjectApplyConfiguration struct { + Ref *corev1.ObjectReference `json:"ref,omitempty"` +} + +// TemplateInstanceObjectApplyConfiguration constructs a declarative configuration of the TemplateInstanceObject type for use with +// apply. +func TemplateInstanceObject() *TemplateInstanceObjectApplyConfiguration { + return &TemplateInstanceObjectApplyConfiguration{} +} + +// WithRef sets the Ref field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Ref field is set to the value of the last call. +func (b *TemplateInstanceObjectApplyConfiguration) WithRef(value corev1.ObjectReference) *TemplateInstanceObjectApplyConfiguration { + b.Ref = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/templateinstancerequester.go b/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/templateinstancerequester.go new file mode 100644 index 0000000000000..66bd8eb1cb1ba --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/templateinstancerequester.go @@ -0,0 +1,62 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + templatev1 "github.com/openshift/api/template/v1" +) + +// TemplateInstanceRequesterApplyConfiguration represents a declarative configuration of the TemplateInstanceRequester type for use +// with apply. +type TemplateInstanceRequesterApplyConfiguration struct { + Username *string `json:"username,omitempty"` + UID *string `json:"uid,omitempty"` + Groups []string `json:"groups,omitempty"` + Extra map[string]templatev1.ExtraValue `json:"extra,omitempty"` +} + +// TemplateInstanceRequesterApplyConfiguration constructs a declarative configuration of the TemplateInstanceRequester type for use with +// apply. +func TemplateInstanceRequester() *TemplateInstanceRequesterApplyConfiguration { + return &TemplateInstanceRequesterApplyConfiguration{} +} + +// WithUsername sets the Username field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Username field is set to the value of the last call. +func (b *TemplateInstanceRequesterApplyConfiguration) WithUsername(value string) *TemplateInstanceRequesterApplyConfiguration { + b.Username = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *TemplateInstanceRequesterApplyConfiguration) WithUID(value string) *TemplateInstanceRequesterApplyConfiguration { + b.UID = &value + return b +} + +// WithGroups adds the given value to the Groups field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Groups field. +func (b *TemplateInstanceRequesterApplyConfiguration) WithGroups(values ...string) *TemplateInstanceRequesterApplyConfiguration { + for i := range values { + b.Groups = append(b.Groups, values[i]) + } + return b +} + +// WithExtra puts the entries into the Extra field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Extra field, +// overwriting an existing map entries in Extra field with the same key. +func (b *TemplateInstanceRequesterApplyConfiguration) WithExtra(entries map[string]templatev1.ExtraValue) *TemplateInstanceRequesterApplyConfiguration { + if b.Extra == nil && len(entries) > 0 { + b.Extra = make(map[string]templatev1.ExtraValue, len(entries)) + } + for k, v := range entries { + b.Extra[k] = v + } + return b +} diff --git a/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/templateinstancespec.go b/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/templateinstancespec.go new file mode 100644 index 0000000000000..24ef4a2fed36a --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/templateinstancespec.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// TemplateInstanceSpecApplyConfiguration represents a declarative configuration of the TemplateInstanceSpec type for use +// with apply. +type TemplateInstanceSpecApplyConfiguration struct { + Template *TemplateApplyConfiguration `json:"template,omitempty"` + Secret *corev1.LocalObjectReference `json:"secret,omitempty"` + Requester *TemplateInstanceRequesterApplyConfiguration `json:"requester,omitempty"` +} + +// TemplateInstanceSpecApplyConfiguration constructs a declarative configuration of the TemplateInstanceSpec type for use with +// apply. +func TemplateInstanceSpec() *TemplateInstanceSpecApplyConfiguration { + return &TemplateInstanceSpecApplyConfiguration{} +} + +// WithTemplate sets the Template field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Template field is set to the value of the last call. +func (b *TemplateInstanceSpecApplyConfiguration) WithTemplate(value *TemplateApplyConfiguration) *TemplateInstanceSpecApplyConfiguration { + b.Template = value + return b +} + +// WithSecret sets the Secret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Secret field is set to the value of the last call. +func (b *TemplateInstanceSpecApplyConfiguration) WithSecret(value corev1.LocalObjectReference) *TemplateInstanceSpecApplyConfiguration { + b.Secret = &value + return b +} + +// WithRequester sets the Requester field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Requester field is set to the value of the last call. +func (b *TemplateInstanceSpecApplyConfiguration) WithRequester(value *TemplateInstanceRequesterApplyConfiguration) *TemplateInstanceSpecApplyConfiguration { + b.Requester = value + return b +} diff --git a/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/templateinstancestatus.go b/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/templateinstancestatus.go new file mode 100644 index 0000000000000..f359612c971f0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/applyconfigurations/template/v1/templateinstancestatus.go @@ -0,0 +1,42 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// TemplateInstanceStatusApplyConfiguration represents a declarative configuration of the TemplateInstanceStatus type for use +// with apply. +type TemplateInstanceStatusApplyConfiguration struct { + Conditions []TemplateInstanceConditionApplyConfiguration `json:"conditions,omitempty"` + Objects []TemplateInstanceObjectApplyConfiguration `json:"objects,omitempty"` +} + +// TemplateInstanceStatusApplyConfiguration constructs a declarative configuration of the TemplateInstanceStatus type for use with +// apply. +func TemplateInstanceStatus() *TemplateInstanceStatusApplyConfiguration { + return &TemplateInstanceStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *TemplateInstanceStatusApplyConfiguration) WithConditions(values ...*TemplateInstanceConditionApplyConfiguration) *TemplateInstanceStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithObjects adds the given value to the Objects field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Objects field. +func (b *TemplateInstanceStatusApplyConfiguration) WithObjects(values ...*TemplateInstanceObjectApplyConfiguration) *TemplateInstanceStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithObjects") + } + b.Objects = append(b.Objects, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/template/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/template/clientset/versioned/clientset.go new file mode 100644 index 0000000000000..ca7dc6db607f1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/clientset/versioned/clientset.go @@ -0,0 +1,104 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + fmt "fmt" + http "net/http" + + templatev1 "github.com/openshift/client-go/template/clientset/versioned/typed/template/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + TemplateV1() templatev1.TemplateV1Interface +} + +// Clientset contains the clients for groups. +type Clientset struct { + *discovery.DiscoveryClient + templateV1 *templatev1.TemplateV1Client +} + +// TemplateV1 retrieves the TemplateV1Client +func (c *Clientset) TemplateV1() templatev1.TemplateV1Interface { + return c.templateV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.templateV1, err = templatev1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.templateV1 = templatev1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/template/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/client-go/template/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000000..14db57a58f8d2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/clientset/versioned/scheme/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/openshift/client-go/template/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/template/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000000..5bce0ae5c50c8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/clientset/versioned/scheme/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + templatev1 "github.com/openshift/api/template/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + templatev1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/openshift/client-go/template/clientset/versioned/typed/template/v1/brokertemplateinstance.go b/vendor/github.com/openshift/client-go/template/clientset/versioned/typed/template/v1/brokertemplateinstance.go new file mode 100644 index 0000000000000..e25f0c0ded132 --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/clientset/versioned/typed/template/v1/brokertemplateinstance.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + templatev1 "github.com/openshift/api/template/v1" + applyconfigurationstemplatev1 "github.com/openshift/client-go/template/applyconfigurations/template/v1" + scheme "github.com/openshift/client-go/template/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// BrokerTemplateInstancesGetter has a method to return a BrokerTemplateInstanceInterface. +// A group's client should implement this interface. +type BrokerTemplateInstancesGetter interface { + BrokerTemplateInstances() BrokerTemplateInstanceInterface +} + +// BrokerTemplateInstanceInterface has methods to work with BrokerTemplateInstance resources. +type BrokerTemplateInstanceInterface interface { + Create(ctx context.Context, brokerTemplateInstance *templatev1.BrokerTemplateInstance, opts metav1.CreateOptions) (*templatev1.BrokerTemplateInstance, error) + Update(ctx context.Context, brokerTemplateInstance *templatev1.BrokerTemplateInstance, opts metav1.UpdateOptions) (*templatev1.BrokerTemplateInstance, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*templatev1.BrokerTemplateInstance, error) + List(ctx context.Context, opts metav1.ListOptions) (*templatev1.BrokerTemplateInstanceList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *templatev1.BrokerTemplateInstance, err error) + Apply(ctx context.Context, brokerTemplateInstance *applyconfigurationstemplatev1.BrokerTemplateInstanceApplyConfiguration, opts metav1.ApplyOptions) (result *templatev1.BrokerTemplateInstance, err error) + BrokerTemplateInstanceExpansion +} + +// brokerTemplateInstances implements BrokerTemplateInstanceInterface +type brokerTemplateInstances struct { + *gentype.ClientWithListAndApply[*templatev1.BrokerTemplateInstance, *templatev1.BrokerTemplateInstanceList, *applyconfigurationstemplatev1.BrokerTemplateInstanceApplyConfiguration] +} + +// newBrokerTemplateInstances returns a BrokerTemplateInstances +func newBrokerTemplateInstances(c *TemplateV1Client) *brokerTemplateInstances { + return &brokerTemplateInstances{ + gentype.NewClientWithListAndApply[*templatev1.BrokerTemplateInstance, *templatev1.BrokerTemplateInstanceList, *applyconfigurationstemplatev1.BrokerTemplateInstanceApplyConfiguration]( + "brokertemplateinstances", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *templatev1.BrokerTemplateInstance { return &templatev1.BrokerTemplateInstance{} }, + func() *templatev1.BrokerTemplateInstanceList { return &templatev1.BrokerTemplateInstanceList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/template/clientset/versioned/typed/template/v1/doc.go b/vendor/github.com/openshift/client-go/template/clientset/versioned/typed/template/v1/doc.go new file mode 100644 index 0000000000000..225e6b2be34f2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/clientset/versioned/typed/template/v1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/openshift/client-go/template/clientset/versioned/typed/template/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/template/clientset/versioned/typed/template/v1/generated_expansion.go new file mode 100644 index 0000000000000..57f72c3538118 --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/clientset/versioned/typed/template/v1/generated_expansion.go @@ -0,0 +1,9 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type BrokerTemplateInstanceExpansion interface{} + +type TemplateExpansion interface{} + +type TemplateInstanceExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/template/clientset/versioned/typed/template/v1/template.go b/vendor/github.com/openshift/client-go/template/clientset/versioned/typed/template/v1/template.go new file mode 100644 index 0000000000000..10d60cb28ede0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/clientset/versioned/typed/template/v1/template.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + templatev1 "github.com/openshift/api/template/v1" + applyconfigurationstemplatev1 "github.com/openshift/client-go/template/applyconfigurations/template/v1" + scheme "github.com/openshift/client-go/template/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// TemplatesGetter has a method to return a TemplateInterface. +// A group's client should implement this interface. +type TemplatesGetter interface { + Templates(namespace string) TemplateInterface +} + +// TemplateInterface has methods to work with Template resources. +type TemplateInterface interface { + Create(ctx context.Context, template *templatev1.Template, opts metav1.CreateOptions) (*templatev1.Template, error) + Update(ctx context.Context, template *templatev1.Template, opts metav1.UpdateOptions) (*templatev1.Template, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*templatev1.Template, error) + List(ctx context.Context, opts metav1.ListOptions) (*templatev1.TemplateList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *templatev1.Template, err error) + Apply(ctx context.Context, template *applyconfigurationstemplatev1.TemplateApplyConfiguration, opts metav1.ApplyOptions) (result *templatev1.Template, err error) + TemplateExpansion +} + +// templates implements TemplateInterface +type templates struct { + *gentype.ClientWithListAndApply[*templatev1.Template, *templatev1.TemplateList, *applyconfigurationstemplatev1.TemplateApplyConfiguration] +} + +// newTemplates returns a Templates +func newTemplates(c *TemplateV1Client, namespace string) *templates { + return &templates{ + gentype.NewClientWithListAndApply[*templatev1.Template, *templatev1.TemplateList, *applyconfigurationstemplatev1.TemplateApplyConfiguration]( + "templates", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *templatev1.Template { return &templatev1.Template{} }, + func() *templatev1.TemplateList { return &templatev1.TemplateList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/template/clientset/versioned/typed/template/v1/template_client.go b/vendor/github.com/openshift/client-go/template/clientset/versioned/typed/template/v1/template_client.go new file mode 100644 index 0000000000000..73c07f0a123e1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/clientset/versioned/typed/template/v1/template_client.go @@ -0,0 +1,101 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + http "net/http" + + templatev1 "github.com/openshift/api/template/v1" + scheme "github.com/openshift/client-go/template/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type TemplateV1Interface interface { + RESTClient() rest.Interface + BrokerTemplateInstancesGetter + TemplatesGetter + TemplateInstancesGetter +} + +// TemplateV1Client is used to interact with features provided by the template.openshift.io group. +type TemplateV1Client struct { + restClient rest.Interface +} + +func (c *TemplateV1Client) BrokerTemplateInstances() BrokerTemplateInstanceInterface { + return newBrokerTemplateInstances(c) +} + +func (c *TemplateV1Client) Templates(namespace string) TemplateInterface { + return newTemplates(c, namespace) +} + +func (c *TemplateV1Client) TemplateInstances(namespace string) TemplateInstanceInterface { + return newTemplateInstances(c, namespace) +} + +// NewForConfig creates a new TemplateV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*TemplateV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new TemplateV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*TemplateV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &TemplateV1Client{client}, nil +} + +// NewForConfigOrDie creates a new TemplateV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *TemplateV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new TemplateV1Client for the given RESTClient. +func New(c rest.Interface) *TemplateV1Client { + return &TemplateV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := templatev1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *TemplateV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/client-go/template/clientset/versioned/typed/template/v1/templateinstance.go b/vendor/github.com/openshift/client-go/template/clientset/versioned/typed/template/v1/templateinstance.go new file mode 100644 index 0000000000000..2740031039ca9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/clientset/versioned/typed/template/v1/templateinstance.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + templatev1 "github.com/openshift/api/template/v1" + applyconfigurationstemplatev1 "github.com/openshift/client-go/template/applyconfigurations/template/v1" + scheme "github.com/openshift/client-go/template/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// TemplateInstancesGetter has a method to return a TemplateInstanceInterface. +// A group's client should implement this interface. +type TemplateInstancesGetter interface { + TemplateInstances(namespace string) TemplateInstanceInterface +} + +// TemplateInstanceInterface has methods to work with TemplateInstance resources. +type TemplateInstanceInterface interface { + Create(ctx context.Context, templateInstance *templatev1.TemplateInstance, opts metav1.CreateOptions) (*templatev1.TemplateInstance, error) + Update(ctx context.Context, templateInstance *templatev1.TemplateInstance, opts metav1.UpdateOptions) (*templatev1.TemplateInstance, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, templateInstance *templatev1.TemplateInstance, opts metav1.UpdateOptions) (*templatev1.TemplateInstance, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*templatev1.TemplateInstance, error) + List(ctx context.Context, opts metav1.ListOptions) (*templatev1.TemplateInstanceList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *templatev1.TemplateInstance, err error) + Apply(ctx context.Context, templateInstance *applyconfigurationstemplatev1.TemplateInstanceApplyConfiguration, opts metav1.ApplyOptions) (result *templatev1.TemplateInstance, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, templateInstance *applyconfigurationstemplatev1.TemplateInstanceApplyConfiguration, opts metav1.ApplyOptions) (result *templatev1.TemplateInstance, err error) + TemplateInstanceExpansion +} + +// templateInstances implements TemplateInstanceInterface +type templateInstances struct { + *gentype.ClientWithListAndApply[*templatev1.TemplateInstance, *templatev1.TemplateInstanceList, *applyconfigurationstemplatev1.TemplateInstanceApplyConfiguration] +} + +// newTemplateInstances returns a TemplateInstances +func newTemplateInstances(c *TemplateV1Client, namespace string) *templateInstances { + return &templateInstances{ + gentype.NewClientWithListAndApply[*templatev1.TemplateInstance, *templatev1.TemplateInstanceList, *applyconfigurationstemplatev1.TemplateInstanceApplyConfiguration]( + "templateinstances", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *templatev1.TemplateInstance { return &templatev1.TemplateInstance{} }, + func() *templatev1.TemplateInstanceList { return &templatev1.TemplateInstanceList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/template/informers/externalversions/factory.go b/vendor/github.com/openshift/client-go/template/informers/externalversions/factory.go new file mode 100644 index 0000000000000..4a8cfee4b05d3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/informers/externalversions/factory.go @@ -0,0 +1,246 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/openshift/client-go/template/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/template/informers/externalversions/internalinterfaces" + template "github.com/openshift/client-go/template/informers/externalversions/template" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + transform cache.TransformFunc + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.shuttingDown { + return + } + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() + f.startedInformers[informerType] = true + } + } +} + +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + + Template() template.Interface +} + +func (f *sharedInformerFactory) Template() template.Interface { + return template.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/template/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/template/informers/externalversions/generic.go new file mode 100644 index 0000000000000..a87032f86227e --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/informers/externalversions/generic.go @@ -0,0 +1,50 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + fmt "fmt" + + v1 "github.com/openshift/api/template/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=template.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("brokertemplateinstances"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Template().V1().BrokerTemplateInstances().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("templates"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Template().V1().Templates().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("templateinstances"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Template().V1().TemplateInstances().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/openshift/client-go/template/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/openshift/client-go/template/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000000..1b2aea92f99c3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,24 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/openshift/client-go/template/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/openshift/client-go/template/informers/externalversions/template/interface.go b/vendor/github.com/openshift/client-go/template/informers/externalversions/template/interface.go new file mode 100644 index 0000000000000..a06ae741861d8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/informers/externalversions/template/interface.go @@ -0,0 +1,30 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package template + +import ( + internalinterfaces "github.com/openshift/client-go/template/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/template/informers/externalversions/template/v1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/template/informers/externalversions/template/v1/brokertemplateinstance.go b/vendor/github.com/openshift/client-go/template/informers/externalversions/template/v1/brokertemplateinstance.go new file mode 100644 index 0000000000000..5bbd9f380f1e8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/informers/externalversions/template/v1/brokertemplateinstance.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apitemplatev1 "github.com/openshift/api/template/v1" + versioned "github.com/openshift/client-go/template/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/template/informers/externalversions/internalinterfaces" + templatev1 "github.com/openshift/client-go/template/listers/template/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// BrokerTemplateInstanceInformer provides access to a shared informer and lister for +// BrokerTemplateInstances. +type BrokerTemplateInstanceInformer interface { + Informer() cache.SharedIndexInformer + Lister() templatev1.BrokerTemplateInstanceLister +} + +type brokerTemplateInstanceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewBrokerTemplateInstanceInformer constructs a new informer for BrokerTemplateInstance type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewBrokerTemplateInstanceInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredBrokerTemplateInstanceInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredBrokerTemplateInstanceInformer constructs a new informer for BrokerTemplateInstance type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredBrokerTemplateInstanceInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.TemplateV1().BrokerTemplateInstances().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.TemplateV1().BrokerTemplateInstances().Watch(context.TODO(), options) + }, + }, + &apitemplatev1.BrokerTemplateInstance{}, + resyncPeriod, + indexers, + ) +} + +func (f *brokerTemplateInstanceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredBrokerTemplateInstanceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *brokerTemplateInstanceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apitemplatev1.BrokerTemplateInstance{}, f.defaultInformer) +} + +func (f *brokerTemplateInstanceInformer) Lister() templatev1.BrokerTemplateInstanceLister { + return templatev1.NewBrokerTemplateInstanceLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/template/informers/externalversions/template/v1/interface.go b/vendor/github.com/openshift/client-go/template/informers/externalversions/template/v1/interface.go new file mode 100644 index 0000000000000..19f80d3c9a926 --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/informers/externalversions/template/v1/interface.go @@ -0,0 +1,43 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/openshift/client-go/template/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // BrokerTemplateInstances returns a BrokerTemplateInstanceInformer. + BrokerTemplateInstances() BrokerTemplateInstanceInformer + // Templates returns a TemplateInformer. + Templates() TemplateInformer + // TemplateInstances returns a TemplateInstanceInformer. + TemplateInstances() TemplateInstanceInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// BrokerTemplateInstances returns a BrokerTemplateInstanceInformer. +func (v *version) BrokerTemplateInstances() BrokerTemplateInstanceInformer { + return &brokerTemplateInstanceInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Templates returns a TemplateInformer. +func (v *version) Templates() TemplateInformer { + return &templateInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// TemplateInstances returns a TemplateInstanceInformer. +func (v *version) TemplateInstances() TemplateInstanceInformer { + return &templateInstanceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/template/informers/externalversions/template/v1/template.go b/vendor/github.com/openshift/client-go/template/informers/externalversions/template/v1/template.go new file mode 100644 index 0000000000000..600596951a939 --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/informers/externalversions/template/v1/template.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apitemplatev1 "github.com/openshift/api/template/v1" + versioned "github.com/openshift/client-go/template/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/template/informers/externalversions/internalinterfaces" + templatev1 "github.com/openshift/client-go/template/listers/template/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// TemplateInformer provides access to a shared informer and lister for +// Templates. +type TemplateInformer interface { + Informer() cache.SharedIndexInformer + Lister() templatev1.TemplateLister +} + +type templateInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewTemplateInformer constructs a new informer for Template type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewTemplateInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredTemplateInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredTemplateInformer constructs a new informer for Template type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredTemplateInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.TemplateV1().Templates(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.TemplateV1().Templates(namespace).Watch(context.TODO(), options) + }, + }, + &apitemplatev1.Template{}, + resyncPeriod, + indexers, + ) +} + +func (f *templateInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredTemplateInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *templateInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apitemplatev1.Template{}, f.defaultInformer) +} + +func (f *templateInformer) Lister() templatev1.TemplateLister { + return templatev1.NewTemplateLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/template/informers/externalversions/template/v1/templateinstance.go b/vendor/github.com/openshift/client-go/template/informers/externalversions/template/v1/templateinstance.go new file mode 100644 index 0000000000000..cc3a5afa8afbc --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/informers/externalversions/template/v1/templateinstance.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apitemplatev1 "github.com/openshift/api/template/v1" + versioned "github.com/openshift/client-go/template/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/template/informers/externalversions/internalinterfaces" + templatev1 "github.com/openshift/client-go/template/listers/template/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// TemplateInstanceInformer provides access to a shared informer and lister for +// TemplateInstances. +type TemplateInstanceInformer interface { + Informer() cache.SharedIndexInformer + Lister() templatev1.TemplateInstanceLister +} + +type templateInstanceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewTemplateInstanceInformer constructs a new informer for TemplateInstance type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewTemplateInstanceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredTemplateInstanceInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredTemplateInstanceInformer constructs a new informer for TemplateInstance type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredTemplateInstanceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.TemplateV1().TemplateInstances(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.TemplateV1().TemplateInstances(namespace).Watch(context.TODO(), options) + }, + }, + &apitemplatev1.TemplateInstance{}, + resyncPeriod, + indexers, + ) +} + +func (f *templateInstanceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredTemplateInstanceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *templateInstanceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apitemplatev1.TemplateInstance{}, f.defaultInformer) +} + +func (f *templateInstanceInformer) Lister() templatev1.TemplateInstanceLister { + return templatev1.NewTemplateInstanceLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/template/listers/template/v1/brokertemplateinstance.go b/vendor/github.com/openshift/client-go/template/listers/template/v1/brokertemplateinstance.go new file mode 100644 index 0000000000000..d854917cd7628 --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/listers/template/v1/brokertemplateinstance.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + templatev1 "github.com/openshift/api/template/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// BrokerTemplateInstanceLister helps list BrokerTemplateInstances. +// All objects returned here must be treated as read-only. +type BrokerTemplateInstanceLister interface { + // List lists all BrokerTemplateInstances in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*templatev1.BrokerTemplateInstance, err error) + // Get retrieves the BrokerTemplateInstance from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*templatev1.BrokerTemplateInstance, error) + BrokerTemplateInstanceListerExpansion +} + +// brokerTemplateInstanceLister implements the BrokerTemplateInstanceLister interface. +type brokerTemplateInstanceLister struct { + listers.ResourceIndexer[*templatev1.BrokerTemplateInstance] +} + +// NewBrokerTemplateInstanceLister returns a new BrokerTemplateInstanceLister. +func NewBrokerTemplateInstanceLister(indexer cache.Indexer) BrokerTemplateInstanceLister { + return &brokerTemplateInstanceLister{listers.New[*templatev1.BrokerTemplateInstance](indexer, templatev1.Resource("brokertemplateinstance"))} +} diff --git a/vendor/github.com/openshift/client-go/template/listers/template/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/template/listers/template/v1/expansion_generated.go new file mode 100644 index 0000000000000..9ed553b5a5739 --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/listers/template/v1/expansion_generated.go @@ -0,0 +1,23 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// BrokerTemplateInstanceListerExpansion allows custom methods to be added to +// BrokerTemplateInstanceLister. +type BrokerTemplateInstanceListerExpansion interface{} + +// TemplateListerExpansion allows custom methods to be added to +// TemplateLister. +type TemplateListerExpansion interface{} + +// TemplateNamespaceListerExpansion allows custom methods to be added to +// TemplateNamespaceLister. +type TemplateNamespaceListerExpansion interface{} + +// TemplateInstanceListerExpansion allows custom methods to be added to +// TemplateInstanceLister. +type TemplateInstanceListerExpansion interface{} + +// TemplateInstanceNamespaceListerExpansion allows custom methods to be added to +// TemplateInstanceNamespaceLister. +type TemplateInstanceNamespaceListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/template/listers/template/v1/template.go b/vendor/github.com/openshift/client-go/template/listers/template/v1/template.go new file mode 100644 index 0000000000000..0f84e2d4f4352 --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/listers/template/v1/template.go @@ -0,0 +1,54 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + templatev1 "github.com/openshift/api/template/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// TemplateLister helps list Templates. +// All objects returned here must be treated as read-only. +type TemplateLister interface { + // List lists all Templates in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*templatev1.Template, err error) + // Templates returns an object that can list and get Templates. + Templates(namespace string) TemplateNamespaceLister + TemplateListerExpansion +} + +// templateLister implements the TemplateLister interface. +type templateLister struct { + listers.ResourceIndexer[*templatev1.Template] +} + +// NewTemplateLister returns a new TemplateLister. +func NewTemplateLister(indexer cache.Indexer) TemplateLister { + return &templateLister{listers.New[*templatev1.Template](indexer, templatev1.Resource("template"))} +} + +// Templates returns an object that can list and get Templates. +func (s *templateLister) Templates(namespace string) TemplateNamespaceLister { + return templateNamespaceLister{listers.NewNamespaced[*templatev1.Template](s.ResourceIndexer, namespace)} +} + +// TemplateNamespaceLister helps list and get Templates. +// All objects returned here must be treated as read-only. +type TemplateNamespaceLister interface { + // List lists all Templates in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*templatev1.Template, err error) + // Get retrieves the Template from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*templatev1.Template, error) + TemplateNamespaceListerExpansion +} + +// templateNamespaceLister implements the TemplateNamespaceLister +// interface. +type templateNamespaceLister struct { + listers.ResourceIndexer[*templatev1.Template] +} diff --git a/vendor/github.com/openshift/client-go/template/listers/template/v1/templateinstance.go b/vendor/github.com/openshift/client-go/template/listers/template/v1/templateinstance.go new file mode 100644 index 0000000000000..3bc83c4751fd6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/template/listers/template/v1/templateinstance.go @@ -0,0 +1,54 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + templatev1 "github.com/openshift/api/template/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// TemplateInstanceLister helps list TemplateInstances. +// All objects returned here must be treated as read-only. +type TemplateInstanceLister interface { + // List lists all TemplateInstances in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*templatev1.TemplateInstance, err error) + // TemplateInstances returns an object that can list and get TemplateInstances. + TemplateInstances(namespace string) TemplateInstanceNamespaceLister + TemplateInstanceListerExpansion +} + +// templateInstanceLister implements the TemplateInstanceLister interface. +type templateInstanceLister struct { + listers.ResourceIndexer[*templatev1.TemplateInstance] +} + +// NewTemplateInstanceLister returns a new TemplateInstanceLister. +func NewTemplateInstanceLister(indexer cache.Indexer) TemplateInstanceLister { + return &templateInstanceLister{listers.New[*templatev1.TemplateInstance](indexer, templatev1.Resource("templateinstance"))} +} + +// TemplateInstances returns an object that can list and get TemplateInstances. +func (s *templateInstanceLister) TemplateInstances(namespace string) TemplateInstanceNamespaceLister { + return templateInstanceNamespaceLister{listers.NewNamespaced[*templatev1.TemplateInstance](s.ResourceIndexer, namespace)} +} + +// TemplateInstanceNamespaceLister helps list and get TemplateInstances. +// All objects returned here must be treated as read-only. +type TemplateInstanceNamespaceLister interface { + // List lists all TemplateInstances in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*templatev1.TemplateInstance, err error) + // Get retrieves the TemplateInstance from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*templatev1.TemplateInstance, error) + TemplateInstanceNamespaceListerExpansion +} + +// templateInstanceNamespaceLister implements the TemplateInstanceNamespaceLister +// interface. +type templateInstanceNamespaceLister struct { + listers.ResourceIndexer[*templatev1.TemplateInstance] +} diff --git a/vendor/github.com/openshift/client-go/user/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/user/applyconfigurations/internal/internal.go new file mode 100644 index 0000000000000..55c7dfa0d43d3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/applyconfigurations/internal/internal.go @@ -0,0 +1,275 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package internal + +import ( + fmt "fmt" + sync "sync" + + typed "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +func Parser() *typed.Parser { + parserOnce.Do(func() { + var err error + parser, err = typed.NewParser(schemaYAML) + if err != nil { + panic(fmt.Sprintf("Failed to parse schema: %v", err)) + } + }) + return parser +} + +var parserOnce sync.Once +var parser *typed.Parser +var schemaYAML = typed.YAMLObject(`types: +- name: com.github.openshift.api.user.v1.Group + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: users + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.user.v1.Identity + map: + fields: + - name: apiVersion + type: + scalar: string + - name: extra + type: + map: + elementType: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: providerName + type: + scalar: string + default: "" + - name: providerUserName + type: + scalar: string + default: "" + - name: user + type: + namedType: io.k8s.api.core.v1.ObjectReference + default: {} +- name: com.github.openshift.api.user.v1.User + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fullName + type: + scalar: string + - name: groups + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: identities + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} +- name: io.k8s.api.core.v1.ObjectReference + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldPath + type: + scalar: string + - name: kind + type: + scalar: string + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: resourceVersion + type: + scalar: string + - name: uid + type: + scalar: string + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldsType + type: + scalar: string + - name: fieldsV1 + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + - name: manager + type: + scalar: string + - name: operation + type: + scalar: string + - name: subresource + type: + scalar: string + - name: time + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + map: + fields: + - name: annotations + type: + map: + elementType: + scalar: string + - name: creationTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: deletionGracePeriodSeconds + type: + scalar: numeric + - name: deletionTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: finalizers + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: generateName + type: + scalar: string + - name: generation + type: + scalar: numeric + - name: labels + type: + map: + elementType: + scalar: string + - name: managedFields + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + elementRelationship: atomic + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: ownerReferences + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + elementRelationship: associative + keys: + - uid + - name: resourceVersion + type: + scalar: string + - name: selfLink + type: + scalar: string + - name: uid + type: + scalar: string +- name: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + map: + fields: + - name: apiVersion + type: + scalar: string + default: "" + - name: blockOwnerDeletion + type: + scalar: boolean + - name: controller + type: + scalar: boolean + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: uid + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Time + scalar: untyped +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +- name: __untyped_deduced_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +`) diff --git a/vendor/github.com/openshift/client-go/user/applyconfigurations/user/v1/group.go b/vendor/github.com/openshift/client-go/user/applyconfigurations/user/v1/group.go new file mode 100644 index 0000000000000..c6cdc9a09d702 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/applyconfigurations/user/v1/group.go @@ -0,0 +1,237 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + userv1 "github.com/openshift/api/user/v1" + internal "github.com/openshift/client-go/user/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// GroupApplyConfiguration represents a declarative configuration of the Group type for use +// with apply. +type GroupApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Users *userv1.OptionalNames `json:"users,omitempty"` +} + +// Group constructs a declarative configuration of the Group type for use with +// apply. +func Group(name string) *GroupApplyConfiguration { + b := &GroupApplyConfiguration{} + b.WithName(name) + b.WithKind("Group") + b.WithAPIVersion("user.openshift.io/v1") + return b +} + +// ExtractGroup extracts the applied configuration owned by fieldManager from +// group. If no managedFields are found in group for fieldManager, a +// GroupApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// group must be a unmodified Group API object that was retrieved from the Kubernetes API. +// ExtractGroup provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractGroup(group *userv1.Group, fieldManager string) (*GroupApplyConfiguration, error) { + return extractGroup(group, fieldManager, "") +} + +// ExtractGroupStatus is the same as ExtractGroup except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractGroupStatus(group *userv1.Group, fieldManager string) (*GroupApplyConfiguration, error) { + return extractGroup(group, fieldManager, "status") +} + +func extractGroup(group *userv1.Group, fieldManager string, subresource string) (*GroupApplyConfiguration, error) { + b := &GroupApplyConfiguration{} + err := managedfields.ExtractInto(group, internal.Parser().Type("com.github.openshift.api.user.v1.Group"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(group.Name) + + b.WithKind("Group") + b.WithAPIVersion("user.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *GroupApplyConfiguration) WithKind(value string) *GroupApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *GroupApplyConfiguration) WithAPIVersion(value string) *GroupApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *GroupApplyConfiguration) WithName(value string) *GroupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *GroupApplyConfiguration) WithGenerateName(value string) *GroupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *GroupApplyConfiguration) WithNamespace(value string) *GroupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *GroupApplyConfiguration) WithUID(value types.UID) *GroupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *GroupApplyConfiguration) WithResourceVersion(value string) *GroupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *GroupApplyConfiguration) WithGeneration(value int64) *GroupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *GroupApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *GroupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *GroupApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *GroupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *GroupApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *GroupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *GroupApplyConfiguration) WithLabels(entries map[string]string) *GroupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *GroupApplyConfiguration) WithAnnotations(entries map[string]string) *GroupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *GroupApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *GroupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *GroupApplyConfiguration) WithFinalizers(values ...string) *GroupApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *GroupApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithUsers sets the Users field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Users field is set to the value of the last call. +func (b *GroupApplyConfiguration) WithUsers(value userv1.OptionalNames) *GroupApplyConfiguration { + b.Users = &value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *GroupApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/user/applyconfigurations/user/v1/identity.go b/vendor/github.com/openshift/client-go/user/applyconfigurations/user/v1/identity.go new file mode 100644 index 0000000000000..b957fbc175dbf --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/applyconfigurations/user/v1/identity.go @@ -0,0 +1,271 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + userv1 "github.com/openshift/api/user/v1" + internal "github.com/openshift/client-go/user/applyconfigurations/internal" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// IdentityApplyConfiguration represents a declarative configuration of the Identity type for use +// with apply. +type IdentityApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + ProviderName *string `json:"providerName,omitempty"` + ProviderUserName *string `json:"providerUserName,omitempty"` + User *corev1.ObjectReference `json:"user,omitempty"` + Extra map[string]string `json:"extra,omitempty"` +} + +// Identity constructs a declarative configuration of the Identity type for use with +// apply. +func Identity(name string) *IdentityApplyConfiguration { + b := &IdentityApplyConfiguration{} + b.WithName(name) + b.WithKind("Identity") + b.WithAPIVersion("user.openshift.io/v1") + return b +} + +// ExtractIdentity extracts the applied configuration owned by fieldManager from +// identity. If no managedFields are found in identity for fieldManager, a +// IdentityApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// identity must be a unmodified Identity API object that was retrieved from the Kubernetes API. +// ExtractIdentity provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractIdentity(identity *userv1.Identity, fieldManager string) (*IdentityApplyConfiguration, error) { + return extractIdentity(identity, fieldManager, "") +} + +// ExtractIdentityStatus is the same as ExtractIdentity except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractIdentityStatus(identity *userv1.Identity, fieldManager string) (*IdentityApplyConfiguration, error) { + return extractIdentity(identity, fieldManager, "status") +} + +func extractIdentity(identity *userv1.Identity, fieldManager string, subresource string) (*IdentityApplyConfiguration, error) { + b := &IdentityApplyConfiguration{} + err := managedfields.ExtractInto(identity, internal.Parser().Type("com.github.openshift.api.user.v1.Identity"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(identity.Name) + + b.WithKind("Identity") + b.WithAPIVersion("user.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *IdentityApplyConfiguration) WithKind(value string) *IdentityApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *IdentityApplyConfiguration) WithAPIVersion(value string) *IdentityApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *IdentityApplyConfiguration) WithName(value string) *IdentityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *IdentityApplyConfiguration) WithGenerateName(value string) *IdentityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *IdentityApplyConfiguration) WithNamespace(value string) *IdentityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *IdentityApplyConfiguration) WithUID(value types.UID) *IdentityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *IdentityApplyConfiguration) WithResourceVersion(value string) *IdentityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *IdentityApplyConfiguration) WithGeneration(value int64) *IdentityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *IdentityApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *IdentityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *IdentityApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *IdentityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *IdentityApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IdentityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *IdentityApplyConfiguration) WithLabels(entries map[string]string) *IdentityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *IdentityApplyConfiguration) WithAnnotations(entries map[string]string) *IdentityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *IdentityApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *IdentityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *IdentityApplyConfiguration) WithFinalizers(values ...string) *IdentityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *IdentityApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithProviderName sets the ProviderName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProviderName field is set to the value of the last call. +func (b *IdentityApplyConfiguration) WithProviderName(value string) *IdentityApplyConfiguration { + b.ProviderName = &value + return b +} + +// WithProviderUserName sets the ProviderUserName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProviderUserName field is set to the value of the last call. +func (b *IdentityApplyConfiguration) WithProviderUserName(value string) *IdentityApplyConfiguration { + b.ProviderUserName = &value + return b +} + +// WithUser sets the User field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the User field is set to the value of the last call. +func (b *IdentityApplyConfiguration) WithUser(value corev1.ObjectReference) *IdentityApplyConfiguration { + b.User = &value + return b +} + +// WithExtra puts the entries into the Extra field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Extra field, +// overwriting an existing map entries in Extra field with the same key. +func (b *IdentityApplyConfiguration) WithExtra(entries map[string]string) *IdentityApplyConfiguration { + if b.Extra == nil && len(entries) > 0 { + b.Extra = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Extra[k] = v + } + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IdentityApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/user/applyconfigurations/user/v1/user.go b/vendor/github.com/openshift/client-go/user/applyconfigurations/user/v1/user.go new file mode 100644 index 0000000000000..ad2216d9e8fcb --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/applyconfigurations/user/v1/user.go @@ -0,0 +1,259 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + userv1 "github.com/openshift/api/user/v1" + internal "github.com/openshift/client-go/user/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// UserApplyConfiguration represents a declarative configuration of the User type for use +// with apply. +type UserApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + FullName *string `json:"fullName,omitempty"` + Identities []string `json:"identities,omitempty"` + Groups []string `json:"groups,omitempty"` +} + +// User constructs a declarative configuration of the User type for use with +// apply. +func User(name string) *UserApplyConfiguration { + b := &UserApplyConfiguration{} + b.WithName(name) + b.WithKind("User") + b.WithAPIVersion("user.openshift.io/v1") + return b +} + +// ExtractUser extracts the applied configuration owned by fieldManager from +// user. If no managedFields are found in user for fieldManager, a +// UserApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// user must be a unmodified User API object that was retrieved from the Kubernetes API. +// ExtractUser provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractUser(user *userv1.User, fieldManager string) (*UserApplyConfiguration, error) { + return extractUser(user, fieldManager, "") +} + +// ExtractUserStatus is the same as ExtractUser except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractUserStatus(user *userv1.User, fieldManager string) (*UserApplyConfiguration, error) { + return extractUser(user, fieldManager, "status") +} + +func extractUser(user *userv1.User, fieldManager string, subresource string) (*UserApplyConfiguration, error) { + b := &UserApplyConfiguration{} + err := managedfields.ExtractInto(user, internal.Parser().Type("com.github.openshift.api.user.v1.User"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(user.Name) + + b.WithKind("User") + b.WithAPIVersion("user.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *UserApplyConfiguration) WithKind(value string) *UserApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *UserApplyConfiguration) WithAPIVersion(value string) *UserApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *UserApplyConfiguration) WithName(value string) *UserApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *UserApplyConfiguration) WithGenerateName(value string) *UserApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *UserApplyConfiguration) WithNamespace(value string) *UserApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *UserApplyConfiguration) WithUID(value types.UID) *UserApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *UserApplyConfiguration) WithResourceVersion(value string) *UserApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *UserApplyConfiguration) WithGeneration(value int64) *UserApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *UserApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *UserApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *UserApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *UserApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *UserApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *UserApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *UserApplyConfiguration) WithLabels(entries map[string]string) *UserApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *UserApplyConfiguration) WithAnnotations(entries map[string]string) *UserApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *UserApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *UserApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *UserApplyConfiguration) WithFinalizers(values ...string) *UserApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *UserApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithFullName sets the FullName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FullName field is set to the value of the last call. +func (b *UserApplyConfiguration) WithFullName(value string) *UserApplyConfiguration { + b.FullName = &value + return b +} + +// WithIdentities adds the given value to the Identities field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Identities field. +func (b *UserApplyConfiguration) WithIdentities(values ...string) *UserApplyConfiguration { + for i := range values { + b.Identities = append(b.Identities, values[i]) + } + return b +} + +// WithGroups adds the given value to the Groups field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Groups field. +func (b *UserApplyConfiguration) WithGroups(values ...string) *UserApplyConfiguration { + for i := range values { + b.Groups = append(b.Groups, values[i]) + } + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *UserApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/user/applyconfigurations/utils.go b/vendor/github.com/openshift/client-go/user/applyconfigurations/utils.go new file mode 100644 index 0000000000000..33c0a2d2adfc9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/applyconfigurations/utils.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package applyconfigurations + +import ( + v1 "github.com/openshift/api/user/v1" + internal "github.com/openshift/client-go/user/applyconfigurations/internal" + userv1 "github.com/openshift/client-go/user/applyconfigurations/user/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no +// apply configuration type exists for the given GroupVersionKind. +func ForKind(kind schema.GroupVersionKind) interface{} { + switch kind { + // Group=user.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithKind("Group"): + return &userv1.GroupApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Identity"): + return &userv1.IdentityApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("User"): + return &userv1.UserApplyConfiguration{} + + } + return nil +} + +func NewTypeConverter(scheme *runtime.Scheme) *testing.TypeConverter { + return &testing.TypeConverter{Scheme: scheme, TypeResolver: internal.Parser()} +} diff --git a/vendor/github.com/openshift/client-go/user/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/user/clientset/versioned/clientset.go new file mode 100644 index 0000000000000..bb482196b09f8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/clientset/versioned/clientset.go @@ -0,0 +1,104 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + fmt "fmt" + http "net/http" + + userv1 "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + UserV1() userv1.UserV1Interface +} + +// Clientset contains the clients for groups. +type Clientset struct { + *discovery.DiscoveryClient + userV1 *userv1.UserV1Client +} + +// UserV1 retrieves the UserV1Client +func (c *Clientset) UserV1() userv1.UserV1Interface { + return c.userV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.userV1, err = userv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.userV1 = userv1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/user/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/openshift/client-go/user/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 0000000000000..188181df8f57e --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,106 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + applyconfigurations "github.com/openshift/client-go/user/applyconfigurations" + clientset "github.com/openshift/client-go/user/clientset/versioned" + userv1 "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1" + fakeuserv1 "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +// +// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. +// via --with-applyconfig). +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +// NewClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewClientset(objects ...runtime.Object) *Clientset { + o := testing.NewFieldManagedObjectTracker( + scheme, + codecs.UniversalDecoder(), + applyconfigurations.NewTypeConverter(scheme), + ) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) + +// UserV1 retrieves the UserV1Client +func (c *Clientset) UserV1() userv1.UserV1Interface { + return &fakeuserv1.FakeUserV1{Fake: &c.Fake} +} diff --git a/vendor/github.com/openshift/client-go/user/clientset/versioned/fake/doc.go b/vendor/github.com/openshift/client-go/user/clientset/versioned/fake/doc.go new file mode 100644 index 0000000000000..3630ed1cd17db --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/clientset/versioned/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/github.com/openshift/client-go/user/clientset/versioned/fake/register.go b/vendor/github.com/openshift/client-go/user/clientset/versioned/fake/register.go new file mode 100644 index 0000000000000..34eafed42432d --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/clientset/versioned/fake/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + userv1 "github.com/openshift/api/user/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +var localSchemeBuilder = runtime.SchemeBuilder{ + userv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/vendor/github.com/openshift/client-go/user/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/client-go/user/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000000..14db57a58f8d2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/clientset/versioned/scheme/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/openshift/client-go/user/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/user/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000000..736da01969f03 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/clientset/versioned/scheme/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + userv1 "github.com/openshift/api/user/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + userv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/doc.go b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/doc.go new file mode 100644 index 0000000000000..225e6b2be34f2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/fake/doc.go b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/fake/doc.go new file mode 100644 index 0000000000000..2b5ba4c8e4422 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/fake/fake_group.go b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/fake/fake_group.go new file mode 100644 index 0000000000000..c5d9173f5de9c --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/fake/fake_group.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/user/v1" + userv1 "github.com/openshift/client-go/user/applyconfigurations/user/v1" + typeduserv1 "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeGroups implements GroupInterface +type fakeGroups struct { + *gentype.FakeClientWithListAndApply[*v1.Group, *v1.GroupList, *userv1.GroupApplyConfiguration] + Fake *FakeUserV1 +} + +func newFakeGroups(fake *FakeUserV1) typeduserv1.GroupInterface { + return &fakeGroups{ + gentype.NewFakeClientWithListAndApply[*v1.Group, *v1.GroupList, *userv1.GroupApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("groups"), + v1.SchemeGroupVersion.WithKind("Group"), + func() *v1.Group { return &v1.Group{} }, + func() *v1.GroupList { return &v1.GroupList{} }, + func(dst, src *v1.GroupList) { dst.ListMeta = src.ListMeta }, + func(list *v1.GroupList) []*v1.Group { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.GroupList, items []*v1.Group) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/fake/fake_identity.go b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/fake/fake_identity.go new file mode 100644 index 0000000000000..6295243705660 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/fake/fake_identity.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/user/v1" + userv1 "github.com/openshift/client-go/user/applyconfigurations/user/v1" + typeduserv1 "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeIdentities implements IdentityInterface +type fakeIdentities struct { + *gentype.FakeClientWithListAndApply[*v1.Identity, *v1.IdentityList, *userv1.IdentityApplyConfiguration] + Fake *FakeUserV1 +} + +func newFakeIdentities(fake *FakeUserV1) typeduserv1.IdentityInterface { + return &fakeIdentities{ + gentype.NewFakeClientWithListAndApply[*v1.Identity, *v1.IdentityList, *userv1.IdentityApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("identities"), + v1.SchemeGroupVersion.WithKind("Identity"), + func() *v1.Identity { return &v1.Identity{} }, + func() *v1.IdentityList { return &v1.IdentityList{} }, + func(dst, src *v1.IdentityList) { dst.ListMeta = src.ListMeta }, + func(list *v1.IdentityList) []*v1.Identity { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.IdentityList, items []*v1.Identity) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/fake/fake_user.go b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/fake/fake_user.go new file mode 100644 index 0000000000000..9bea614871c7e --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/fake/fake_user.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/user/v1" + userv1 "github.com/openshift/client-go/user/applyconfigurations/user/v1" + typeduserv1 "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeUsers implements UserInterface +type fakeUsers struct { + *gentype.FakeClientWithListAndApply[*v1.User, *v1.UserList, *userv1.UserApplyConfiguration] + Fake *FakeUserV1 +} + +func newFakeUsers(fake *FakeUserV1) typeduserv1.UserInterface { + return &fakeUsers{ + gentype.NewFakeClientWithListAndApply[*v1.User, *v1.UserList, *userv1.UserApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("users"), + v1.SchemeGroupVersion.WithKind("User"), + func() *v1.User { return &v1.User{} }, + func() *v1.UserList { return &v1.UserList{} }, + func(dst, src *v1.UserList) { dst.ListMeta = src.ListMeta }, + func(list *v1.UserList) []*v1.User { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.UserList, items []*v1.User) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/fake/fake_user_client.go b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/fake/fake_user_client.go new file mode 100644 index 0000000000000..4a7f8189c91e3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/fake/fake_user_client.go @@ -0,0 +1,36 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeUserV1 struct { + *testing.Fake +} + +func (c *FakeUserV1) Groups() v1.GroupInterface { + return newFakeGroups(c) +} + +func (c *FakeUserV1) Identities() v1.IdentityInterface { + return newFakeIdentities(c) +} + +func (c *FakeUserV1) Users() v1.UserInterface { + return newFakeUsers(c) +} + +func (c *FakeUserV1) UserIdentityMappings() v1.UserIdentityMappingInterface { + return newFakeUserIdentityMappings(c) +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeUserV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/fake/fake_useridentitymapping.go b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/fake/fake_useridentitymapping.go new file mode 100644 index 0000000000000..453f876b104c5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/fake/fake_useridentitymapping.go @@ -0,0 +1,28 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/user/v1" + userv1 "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeUserIdentityMappings implements UserIdentityMappingInterface +type fakeUserIdentityMappings struct { + *gentype.FakeClient[*v1.UserIdentityMapping] + Fake *FakeUserV1 +} + +func newFakeUserIdentityMappings(fake *FakeUserV1) userv1.UserIdentityMappingInterface { + return &fakeUserIdentityMappings{ + gentype.NewFakeClient[*v1.UserIdentityMapping]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("useridentitymappings"), + v1.SchemeGroupVersion.WithKind("UserIdentityMapping"), + func() *v1.UserIdentityMapping { return &v1.UserIdentityMapping{} }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/generated_expansion.go new file mode 100644 index 0000000000000..11d78bf5b4c58 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/generated_expansion.go @@ -0,0 +1,11 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type GroupExpansion interface{} + +type IdentityExpansion interface{} + +type UserExpansion interface{} + +type UserIdentityMappingExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/group.go b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/group.go new file mode 100644 index 0000000000000..4b73ce61d80c8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/group.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + userv1 "github.com/openshift/api/user/v1" + applyconfigurationsuserv1 "github.com/openshift/client-go/user/applyconfigurations/user/v1" + scheme "github.com/openshift/client-go/user/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// GroupsGetter has a method to return a GroupInterface. +// A group's client should implement this interface. +type GroupsGetter interface { + Groups() GroupInterface +} + +// GroupInterface has methods to work with Group resources. +type GroupInterface interface { + Create(ctx context.Context, group *userv1.Group, opts metav1.CreateOptions) (*userv1.Group, error) + Update(ctx context.Context, group *userv1.Group, opts metav1.UpdateOptions) (*userv1.Group, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*userv1.Group, error) + List(ctx context.Context, opts metav1.ListOptions) (*userv1.GroupList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *userv1.Group, err error) + Apply(ctx context.Context, group *applyconfigurationsuserv1.GroupApplyConfiguration, opts metav1.ApplyOptions) (result *userv1.Group, err error) + GroupExpansion +} + +// groups implements GroupInterface +type groups struct { + *gentype.ClientWithListAndApply[*userv1.Group, *userv1.GroupList, *applyconfigurationsuserv1.GroupApplyConfiguration] +} + +// newGroups returns a Groups +func newGroups(c *UserV1Client) *groups { + return &groups{ + gentype.NewClientWithListAndApply[*userv1.Group, *userv1.GroupList, *applyconfigurationsuserv1.GroupApplyConfiguration]( + "groups", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *userv1.Group { return &userv1.Group{} }, + func() *userv1.GroupList { return &userv1.GroupList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/identity.go b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/identity.go new file mode 100644 index 0000000000000..e45938efb6c24 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/identity.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + userv1 "github.com/openshift/api/user/v1" + applyconfigurationsuserv1 "github.com/openshift/client-go/user/applyconfigurations/user/v1" + scheme "github.com/openshift/client-go/user/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// IdentitiesGetter has a method to return a IdentityInterface. +// A group's client should implement this interface. +type IdentitiesGetter interface { + Identities() IdentityInterface +} + +// IdentityInterface has methods to work with Identity resources. +type IdentityInterface interface { + Create(ctx context.Context, identity *userv1.Identity, opts metav1.CreateOptions) (*userv1.Identity, error) + Update(ctx context.Context, identity *userv1.Identity, opts metav1.UpdateOptions) (*userv1.Identity, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*userv1.Identity, error) + List(ctx context.Context, opts metav1.ListOptions) (*userv1.IdentityList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *userv1.Identity, err error) + Apply(ctx context.Context, identity *applyconfigurationsuserv1.IdentityApplyConfiguration, opts metav1.ApplyOptions) (result *userv1.Identity, err error) + IdentityExpansion +} + +// identities implements IdentityInterface +type identities struct { + *gentype.ClientWithListAndApply[*userv1.Identity, *userv1.IdentityList, *applyconfigurationsuserv1.IdentityApplyConfiguration] +} + +// newIdentities returns a Identities +func newIdentities(c *UserV1Client) *identities { + return &identities{ + gentype.NewClientWithListAndApply[*userv1.Identity, *userv1.IdentityList, *applyconfigurationsuserv1.IdentityApplyConfiguration]( + "identities", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *userv1.Identity { return &userv1.Identity{} }, + func() *userv1.IdentityList { return &userv1.IdentityList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/user.go b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/user.go new file mode 100644 index 0000000000000..7eabfb651ea8d --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/user.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + userv1 "github.com/openshift/api/user/v1" + applyconfigurationsuserv1 "github.com/openshift/client-go/user/applyconfigurations/user/v1" + scheme "github.com/openshift/client-go/user/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// UsersGetter has a method to return a UserInterface. +// A group's client should implement this interface. +type UsersGetter interface { + Users() UserInterface +} + +// UserInterface has methods to work with User resources. +type UserInterface interface { + Create(ctx context.Context, user *userv1.User, opts metav1.CreateOptions) (*userv1.User, error) + Update(ctx context.Context, user *userv1.User, opts metav1.UpdateOptions) (*userv1.User, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*userv1.User, error) + List(ctx context.Context, opts metav1.ListOptions) (*userv1.UserList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *userv1.User, err error) + Apply(ctx context.Context, user *applyconfigurationsuserv1.UserApplyConfiguration, opts metav1.ApplyOptions) (result *userv1.User, err error) + UserExpansion +} + +// users implements UserInterface +type users struct { + *gentype.ClientWithListAndApply[*userv1.User, *userv1.UserList, *applyconfigurationsuserv1.UserApplyConfiguration] +} + +// newUsers returns a Users +func newUsers(c *UserV1Client) *users { + return &users{ + gentype.NewClientWithListAndApply[*userv1.User, *userv1.UserList, *applyconfigurationsuserv1.UserApplyConfiguration]( + "users", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *userv1.User { return &userv1.User{} }, + func() *userv1.UserList { return &userv1.UserList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/user_client.go b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/user_client.go new file mode 100644 index 0000000000000..fe0126d29bfbf --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/user_client.go @@ -0,0 +1,106 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + http "net/http" + + userv1 "github.com/openshift/api/user/v1" + scheme "github.com/openshift/client-go/user/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type UserV1Interface interface { + RESTClient() rest.Interface + GroupsGetter + IdentitiesGetter + UsersGetter + UserIdentityMappingsGetter +} + +// UserV1Client is used to interact with features provided by the user.openshift.io group. +type UserV1Client struct { + restClient rest.Interface +} + +func (c *UserV1Client) Groups() GroupInterface { + return newGroups(c) +} + +func (c *UserV1Client) Identities() IdentityInterface { + return newIdentities(c) +} + +func (c *UserV1Client) Users() UserInterface { + return newUsers(c) +} + +func (c *UserV1Client) UserIdentityMappings() UserIdentityMappingInterface { + return newUserIdentityMappings(c) +} + +// NewForConfig creates a new UserV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*UserV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new UserV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*UserV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &UserV1Client{client}, nil +} + +// NewForConfigOrDie creates a new UserV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *UserV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new UserV1Client for the given RESTClient. +func New(c rest.Interface) *UserV1Client { + return &UserV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := userv1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *UserV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/useridentitymapping.go b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/useridentitymapping.go new file mode 100644 index 0000000000000..05882eb3eeec6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/useridentitymapping.go @@ -0,0 +1,45 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + userv1 "github.com/openshift/api/user/v1" + scheme "github.com/openshift/client-go/user/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" +) + +// UserIdentityMappingsGetter has a method to return a UserIdentityMappingInterface. +// A group's client should implement this interface. +type UserIdentityMappingsGetter interface { + UserIdentityMappings() UserIdentityMappingInterface +} + +// UserIdentityMappingInterface has methods to work with UserIdentityMapping resources. +type UserIdentityMappingInterface interface { + Create(ctx context.Context, userIdentityMapping *userv1.UserIdentityMapping, opts metav1.CreateOptions) (*userv1.UserIdentityMapping, error) + Update(ctx context.Context, userIdentityMapping *userv1.UserIdentityMapping, opts metav1.UpdateOptions) (*userv1.UserIdentityMapping, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*userv1.UserIdentityMapping, error) + UserIdentityMappingExpansion +} + +// userIdentityMappings implements UserIdentityMappingInterface +type userIdentityMappings struct { + *gentype.Client[*userv1.UserIdentityMapping] +} + +// newUserIdentityMappings returns a UserIdentityMappings +func newUserIdentityMappings(c *UserV1Client) *userIdentityMappings { + return &userIdentityMappings{ + gentype.NewClient[*userv1.UserIdentityMapping]( + "useridentitymappings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *userv1.UserIdentityMapping { return &userv1.UserIdentityMapping{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/user/informers/externalversions/factory.go b/vendor/github.com/openshift/client-go/user/informers/externalversions/factory.go new file mode 100644 index 0000000000000..952e6e95003a3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/informers/externalversions/factory.go @@ -0,0 +1,246 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/openshift/client-go/user/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/user/informers/externalversions/internalinterfaces" + user "github.com/openshift/client-go/user/informers/externalversions/user" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + transform cache.TransformFunc + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.shuttingDown { + return + } + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() + f.startedInformers[informerType] = true + } + } +} + +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + + User() user.Interface +} + +func (f *sharedInformerFactory) User() user.Interface { + return user.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/user/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/user/informers/externalversions/generic.go new file mode 100644 index 0000000000000..a706e1905811e --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/informers/externalversions/generic.go @@ -0,0 +1,50 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + fmt "fmt" + + v1 "github.com/openshift/api/user/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=user.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("groups"): + return &genericInformer{resource: resource.GroupResource(), informer: f.User().V1().Groups().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("identities"): + return &genericInformer{resource: resource.GroupResource(), informer: f.User().V1().Identities().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("users"): + return &genericInformer{resource: resource.GroupResource(), informer: f.User().V1().Users().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/openshift/client-go/user/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/openshift/client-go/user/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000000..8bd5bff0ccf03 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,24 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/openshift/client-go/user/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/openshift/client-go/user/informers/externalversions/user/interface.go b/vendor/github.com/openshift/client-go/user/informers/externalversions/user/interface.go new file mode 100644 index 0000000000000..8ae8bae69500b --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/informers/externalversions/user/interface.go @@ -0,0 +1,30 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package user + +import ( + internalinterfaces "github.com/openshift/client-go/user/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/user/informers/externalversions/user/v1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/user/informers/externalversions/user/v1/group.go b/vendor/github.com/openshift/client-go/user/informers/externalversions/user/v1/group.go new file mode 100644 index 0000000000000..69c542e8f5404 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/informers/externalversions/user/v1/group.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiuserv1 "github.com/openshift/api/user/v1" + versioned "github.com/openshift/client-go/user/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/user/informers/externalversions/internalinterfaces" + userv1 "github.com/openshift/client-go/user/listers/user/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// GroupInformer provides access to a shared informer and lister for +// Groups. +type GroupInformer interface { + Informer() cache.SharedIndexInformer + Lister() userv1.GroupLister +} + +type groupInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewGroupInformer constructs a new informer for Group type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewGroupInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredGroupInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredGroupInformer constructs a new informer for Group type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredGroupInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.UserV1().Groups().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.UserV1().Groups().Watch(context.TODO(), options) + }, + }, + &apiuserv1.Group{}, + resyncPeriod, + indexers, + ) +} + +func (f *groupInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredGroupInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *groupInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiuserv1.Group{}, f.defaultInformer) +} + +func (f *groupInformer) Lister() userv1.GroupLister { + return userv1.NewGroupLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/user/informers/externalversions/user/v1/identity.go b/vendor/github.com/openshift/client-go/user/informers/externalversions/user/v1/identity.go new file mode 100644 index 0000000000000..b7d4263aea404 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/informers/externalversions/user/v1/identity.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiuserv1 "github.com/openshift/api/user/v1" + versioned "github.com/openshift/client-go/user/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/user/informers/externalversions/internalinterfaces" + userv1 "github.com/openshift/client-go/user/listers/user/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// IdentityInformer provides access to a shared informer and lister for +// Identities. +type IdentityInformer interface { + Informer() cache.SharedIndexInformer + Lister() userv1.IdentityLister +} + +type identityInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewIdentityInformer constructs a new informer for Identity type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewIdentityInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredIdentityInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredIdentityInformer constructs a new informer for Identity type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredIdentityInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.UserV1().Identities().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.UserV1().Identities().Watch(context.TODO(), options) + }, + }, + &apiuserv1.Identity{}, + resyncPeriod, + indexers, + ) +} + +func (f *identityInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredIdentityInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *identityInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiuserv1.Identity{}, f.defaultInformer) +} + +func (f *identityInformer) Lister() userv1.IdentityLister { + return userv1.NewIdentityLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/user/informers/externalversions/user/v1/interface.go b/vendor/github.com/openshift/client-go/user/informers/externalversions/user/v1/interface.go new file mode 100644 index 0000000000000..267a1543ddef9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/informers/externalversions/user/v1/interface.go @@ -0,0 +1,43 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/openshift/client-go/user/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Groups returns a GroupInformer. + Groups() GroupInformer + // Identities returns a IdentityInformer. + Identities() IdentityInformer + // Users returns a UserInformer. + Users() UserInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Groups returns a GroupInformer. +func (v *version) Groups() GroupInformer { + return &groupInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Identities returns a IdentityInformer. +func (v *version) Identities() IdentityInformer { + return &identityInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Users returns a UserInformer. +func (v *version) Users() UserInformer { + return &userInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/user/informers/externalversions/user/v1/user.go b/vendor/github.com/openshift/client-go/user/informers/externalversions/user/v1/user.go new file mode 100644 index 0000000000000..1b680051fd8b8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/informers/externalversions/user/v1/user.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiuserv1 "github.com/openshift/api/user/v1" + versioned "github.com/openshift/client-go/user/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/user/informers/externalversions/internalinterfaces" + userv1 "github.com/openshift/client-go/user/listers/user/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// UserInformer provides access to a shared informer and lister for +// Users. +type UserInformer interface { + Informer() cache.SharedIndexInformer + Lister() userv1.UserLister +} + +type userInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewUserInformer constructs a new informer for User type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewUserInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredUserInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredUserInformer constructs a new informer for User type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredUserInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.UserV1().Users().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.UserV1().Users().Watch(context.TODO(), options) + }, + }, + &apiuserv1.User{}, + resyncPeriod, + indexers, + ) +} + +func (f *userInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredUserInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *userInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiuserv1.User{}, f.defaultInformer) +} + +func (f *userInformer) Lister() userv1.UserLister { + return userv1.NewUserLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/user/listers/user/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/user/listers/user/v1/expansion_generated.go new file mode 100644 index 0000000000000..dcb8be2181276 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/listers/user/v1/expansion_generated.go @@ -0,0 +1,15 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// GroupListerExpansion allows custom methods to be added to +// GroupLister. +type GroupListerExpansion interface{} + +// IdentityListerExpansion allows custom methods to be added to +// IdentityLister. +type IdentityListerExpansion interface{} + +// UserListerExpansion allows custom methods to be added to +// UserLister. +type UserListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/user/listers/user/v1/group.go b/vendor/github.com/openshift/client-go/user/listers/user/v1/group.go new file mode 100644 index 0000000000000..90349c6037955 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/listers/user/v1/group.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + userv1 "github.com/openshift/api/user/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// GroupLister helps list Groups. +// All objects returned here must be treated as read-only. +type GroupLister interface { + // List lists all Groups in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*userv1.Group, err error) + // Get retrieves the Group from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*userv1.Group, error) + GroupListerExpansion +} + +// groupLister implements the GroupLister interface. +type groupLister struct { + listers.ResourceIndexer[*userv1.Group] +} + +// NewGroupLister returns a new GroupLister. +func NewGroupLister(indexer cache.Indexer) GroupLister { + return &groupLister{listers.New[*userv1.Group](indexer, userv1.Resource("group"))} +} diff --git a/vendor/github.com/openshift/client-go/user/listers/user/v1/identity.go b/vendor/github.com/openshift/client-go/user/listers/user/v1/identity.go new file mode 100644 index 0000000000000..5b87dc2f70e99 --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/listers/user/v1/identity.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + userv1 "github.com/openshift/api/user/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// IdentityLister helps list Identities. +// All objects returned here must be treated as read-only. +type IdentityLister interface { + // List lists all Identities in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*userv1.Identity, err error) + // Get retrieves the Identity from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*userv1.Identity, error) + IdentityListerExpansion +} + +// identityLister implements the IdentityLister interface. +type identityLister struct { + listers.ResourceIndexer[*userv1.Identity] +} + +// NewIdentityLister returns a new IdentityLister. +func NewIdentityLister(indexer cache.Indexer) IdentityLister { + return &identityLister{listers.New[*userv1.Identity](indexer, userv1.Resource("identity"))} +} diff --git a/vendor/github.com/openshift/client-go/user/listers/user/v1/user.go b/vendor/github.com/openshift/client-go/user/listers/user/v1/user.go new file mode 100644 index 0000000000000..d2e2a674f98ec --- /dev/null +++ b/vendor/github.com/openshift/client-go/user/listers/user/v1/user.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + userv1 "github.com/openshift/api/user/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// UserLister helps list Users. +// All objects returned here must be treated as read-only. +type UserLister interface { + // List lists all Users in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*userv1.User, err error) + // Get retrieves the User from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*userv1.User, error) + UserListerExpansion +} + +// userLister implements the UserLister interface. +type userLister struct { + listers.ResourceIndexer[*userv1.User] +} + +// NewUserLister returns a new UserLister. +func NewUserLister(indexer cache.Indexer) UserLister { + return &userLister{listers.New[*userv1.User](indexer, userv1.Resource("user"))} +} diff --git a/vendor/github.com/openshift/library-go/LICENSE b/vendor/github.com/openshift/library-go/LICENSE new file mode 100644 index 0000000000000..261eeb9e9f8b2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissionregistrationtesting/admissiontesting.go b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissionregistrationtesting/admissiontesting.go new file mode 100644 index 0000000000000..4d1d76adfc2c1 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissionregistrationtesting/admissiontesting.go @@ -0,0 +1,25 @@ +package admissionregistrationtesting + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" +) + +func AdmissionRegistrationTest(registeredAdmission *admission.Plugins, orderedAdmissionPlugins []string, defaultOffPlugins sets.Set[string]) error { + errs := []error{} + registeredPlugins := sets.New(registeredAdmission.Registered()...) + orderedAdmissionPluginsSet := sets.New(orderedAdmissionPlugins...) + + // make sure that all orderedAdmissionPlugins are registered + if diff := orderedAdmissionPluginsSet.Difference(registeredPlugins); len(diff) > 0 { + errs = append(errs, fmt.Errorf("registered plugins missing admission plugins: %v", sets.List(diff))) + } + if diff := defaultOffPlugins.Difference(orderedAdmissionPluginsSet); len(diff) > 0 { + errs = append(errs, fmt.Errorf("ordered admission plugins missing defaultOff plugins: %v", sets.List(diff))) + } + + return errors.NewAggregate(errs) +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig/intiializers.go b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig/intiializers.go new file mode 100644 index 0000000000000..5b4dc1036b07a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig/intiializers.go @@ -0,0 +1,30 @@ +package admissionrestconfig + +import ( + "k8s.io/apiserver/pkg/admission" + restclient "k8s.io/client-go/rest" +) + +func NewInitializer(restClientConfig restclient.Config) admission.PluginInitializer { + return &localInitializer{ + restClientConfig: restClientConfig, + } +} + +// WantsRESTClientConfig gives access to a RESTClientConfig. It's useful for doing unusual things with transports. +type WantsRESTClientConfig interface { + SetRESTClientConfig(restclient.Config) + admission.InitializationValidator +} + +type localInitializer struct { + restClientConfig restclient.Config +} + +// Initialize will check the initialization interfaces implemented by each plugin +// and provide the appropriate initialization data +func (i *localInitializer) Initialize(plugin admission.Interface) { + if wants, ok := plugin.(WantsRESTClientConfig); ok { + wants.SetRESTClientConfig(i.restClientConfig) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/decorator.go b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/decorator.go new file mode 100644 index 0000000000000..3b2d245540492 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/decorator.go @@ -0,0 +1,22 @@ +package admissiontimeout + +import ( + "time" + + "k8s.io/apiserver/pkg/admission" +) + +// AdmissionTimeout provides a decorator that will fail an admission plugin after a certain amount of time +// +// DEPRECATED: use the context of the admission handler instead. +type AdmissionTimeout struct { + Timeout time.Duration +} + +func (d AdmissionTimeout) WithTimeout(admissionPlugin admission.Interface, name string) admission.Interface { + return pluginHandlerWithTimeout{ + name: name, + admissionPlugin: admissionPlugin, + timeout: d.Timeout, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/timeoutadmission.go b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/timeoutadmission.go new file mode 100644 index 0000000000000..2f290641c1960 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/timeoutadmission.go @@ -0,0 +1,126 @@ +package admissiontimeout + +import ( + "context" + "fmt" + "net/http" + "runtime" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission" +) + +type pluginHandlerWithTimeout struct { + name string + admissionPlugin admission.Interface + timeout time.Duration +} + +var _ admission.ValidationInterface = &pluginHandlerWithTimeout{} +var _ admission.MutationInterface = &pluginHandlerWithTimeout{} + +func (p pluginHandlerWithTimeout) Handles(operation admission.Operation) bool { + return p.admissionPlugin.Handles(operation) +} + +func (p pluginHandlerWithTimeout) Admit(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + mutatingHandler, ok := p.admissionPlugin.(admission.MutationInterface) + if !ok { + return nil + } + + type result struct { + admissionErr error + panicErr error + } + // if a timeout occurs, we don't want the child goroutine to hang forever + resultCh := make(chan result, 1) + go func() { + r := result{} + // NOTE: panics don't cross goroutine boundaries, so we have to handle + // the error here, we can't call utilruntime.HandleCrash here, then it + // will cause the apiserver to crash. + // We also need to make sure that the panic is propagated to the caller. + // TODO: use the reusable panic handler once + // https://github.com/kubernetes/kubernetes/pull/115564 merges. + defer func() { + if err := recover(); err != nil { + r.panicErr = fmt.Errorf("admission panic'd: %v", stack(err)) + utilruntime.HandleError(r.panicErr) + } + resultCh <- r + }() + + r.admissionErr = mutatingHandler.Admit(ctx, a, o) + }() + + select { + case r := <-resultCh: + if r.panicErr != nil { + // this panic will propagate to net/http + panic(r.panicErr.(interface{})) + } + return r.admissionErr + case <-time.After(p.timeout): + return errors.NewInternalError(fmt.Errorf("admission plugin %q failed to complete mutation in %v", p.name, p.timeout)) + } +} + +func (p pluginHandlerWithTimeout) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + validatingHandler, ok := p.admissionPlugin.(admission.ValidationInterface) + if !ok { + return nil + } + + type result struct { + admissionErr error + panicErr error + } + // if a timeout occurs, we don't want the child goroutine to hang forever + resultCh := make(chan result, 1) + go func() { + r := result{} + // NOTE: panics don't cross goroutine boundaries, so we have to handle + // the error here, we can't call utilruntime.HandleCrash here, then it + // will cause the apiserver to crash. + // We also need to make sure that the panic is propagated to the caller. + // TODO: use the reusable panic handler once + // https://github.com/kubernetes/kubernetes/pull/115564 merges. + defer func() { + if err := recover(); err != nil { + r.panicErr = fmt.Errorf("admission panic'd: %v", stack(err)) + utilruntime.HandleError(r.panicErr) + } + resultCh <- r + }() + + r.admissionErr = validatingHandler.Validate(ctx, a, o) + }() + + select { + case r := <-resultCh: + if r.panicErr != nil { + // this panic will propagate to net/http + panic(r.panicErr.(interface{})) + } + return r.admissionErr + case <-time.After(p.timeout): + return errors.NewInternalError(fmt.Errorf("admission plugin %q failed to complete validation in %v", p.name, p.timeout)) + } +} + +func stack(recovered interface{}) interface{} { + // do not wrap the sentinel ErrAbortHandler panic value + if recovered == http.ErrAbortHandler { + return recovered + } + + // Same as stdlib http server code. Manually allocate stack + // trace buffer size to prevent excessively large logs + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + return fmt.Sprintf("%v\n%s", recovered, buf) +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/cachecontrol.go b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/cachecontrol.go new file mode 100644 index 0000000000000..611735cccbb8b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/cachecontrol.go @@ -0,0 +1,35 @@ +package apiserverconfig + +import ( + "net/http" + "strings" +) + +// cacheExcludedPaths is small and simple until the handlers include the cache headers they need +var cacheExcludedPathPrefixes = []string{ + "/swagger-2.0.0.json", + "/swagger-2.0.0.pb-v1", + "/swagger-2.0.0.pb-v1.gz", + "/swagger.json", + "/swaggerapi", + "/openapi/", +} + +// cacheControlFilter sets the Cache-Control header to the specified value. +func WithCacheControl(handler http.Handler, value string) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if _, ok := w.Header()["Cache-Control"]; ok { + handler.ServeHTTP(w, req) + return + } + for _, prefix := range cacheExcludedPathPrefixes { + if strings.HasPrefix(req.URL.Path, prefix) { + handler.ServeHTTP(w, req) + return + } + } + + w.Header().Set("Cache-Control", value) + handler.ServeHTTP(w, req) + }) +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/longrunning.go b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/longrunning.go new file mode 100644 index 0000000000000..04062385b846d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/longrunning.go @@ -0,0 +1,38 @@ +package apiserverconfig + +import ( + "net/http" + + "k8s.io/apimachinery/pkg/util/sets" + apirequest "k8s.io/apiserver/pkg/endpoints/request" + genericfilters "k8s.io/apiserver/pkg/server/filters" + + buildv1 "github.com/openshift/api/build/v1" + imagev1 "github.com/openshift/api/image/v1" +) + +var ( + longRunningVerbs = sets.NewString("watch", "proxy") + longRunningSubresources = sets.NewString("attach", "exec", "proxy", "log", "portforward") + kubeLongRunningFunc = genericfilters.BasicLongRunningRequestCheck(longRunningVerbs, longRunningSubresources) +) + +func IsLongRunningRequest(r *http.Request, requestInfo *apirequest.RequestInfo) bool { + if requestInfo == nil { + return false + } + + if requestInfo.APIGroup == buildv1.GroupName && + requestInfo.Resource == "buildconfigs" && + requestInfo.Subresource == "instantiatebinary" { + return true + } + if requestInfo.APIGroup == imagev1.GroupName && + requestInfo.Resource == "imagestreamimports" { + return true + } + if kubeLongRunningFunc(r, requestInfo) { + return true + } + return false +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/personal_subjectaccessreview.go b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/personal_subjectaccessreview.go new file mode 100644 index 0000000000000..4416955cf5fb1 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/personal_subjectaccessreview.go @@ -0,0 +1,129 @@ +package apiserverconfig + +import ( + "bytes" + "io" + "net/http" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/endpoints/request" + apirequest "k8s.io/apiserver/pkg/endpoints/request" + + authorizationv1 "github.com/openshift/api/authorization/v1" +) + +type personalSARRequestInfoResolver struct { + // infoFactory is used to determine info for the request + infoFactory apirequest.RequestInfoResolver +} + +func newPersonalSARRequestInfoResolver(infoFactory apirequest.RequestInfoResolver) apirequest.RequestInfoResolver { + return &personalSARRequestInfoResolver{ + infoFactory: infoFactory, + } +} + +func (a *personalSARRequestInfoResolver) NewRequestInfo(req *http.Request) (*request.RequestInfo, error) { + requestInfo, err := a.infoFactory.NewRequestInfo(req) + if err != nil { + return requestInfo, err + } + + // only match SAR and LSAR requests for personal review + switch { + case !requestInfo.IsResourceRequest: + return requestInfo, nil + + case len(requestInfo.APIGroup) != 0 && requestInfo.APIGroup != "authorization.openshift.io": + return requestInfo, nil + + case len(requestInfo.Subresource) != 0: + return requestInfo, nil + + case requestInfo.Verb != "create": + return requestInfo, nil + + case requestInfo.Resource != "subjectaccessreviews" && requestInfo.Resource != "localsubjectaccessreviews": + return requestInfo, nil + } + + // at this point we're probably running a SAR or LSAR. Decode the body and check. This is expensive. + isSelfSAR, err := isPersonalAccessReviewFromRequest(req, requestInfo) + if err != nil { + return nil, err + } + if !isSelfSAR { + return requestInfo, nil + } + + // if we do have a self-SAR, rewrite the requestInfo to indicate this is a selfsubjectaccessreviews.authorization.k8s.io request + requestInfo.APIGroup = "authorization.k8s.io" + requestInfo.Resource = "selfsubjectaccessreviews" + + return requestInfo, nil +} + +// isPersonalAccessReviewFromRequest this variant handles the case where we have an httpRequest +func isPersonalAccessReviewFromRequest(req *http.Request, requestInfo *request.RequestInfo) (bool, error) { + // TODO once we're integrated with the api installer, we should have direct access to the deserialized content + // for now, this only happens on subjectaccessreviews with a personal check, pay the double retrieve and decode cost + body, err := io.ReadAll(req.Body) + if err != nil { + return false, err + } + req.Body = io.NopCloser(bytes.NewBuffer(body)) + + defaultGVK := schema.GroupVersionKind{Version: requestInfo.APIVersion, Group: requestInfo.APIGroup} + switch requestInfo.Resource { + case "subjectaccessreviews": + defaultGVK.Kind = "SubjectAccessReview" + case "localsubjectaccessreviews": + defaultGVK.Kind = "LocalSubjectAccessReview" + } + + obj, _, err := sarCodecFactory.UniversalDeserializer().Decode(body, &defaultGVK, nil) + if err != nil { + return false, err + } + switch castObj := obj.(type) { + case *authorizationv1.SubjectAccessReview: + return IsPersonalAccessReviewFromSAR(castObj), nil + + case *authorizationv1.LocalSubjectAccessReview: + return isPersonalAccessReviewFromLocalSAR(castObj), nil + + default: + return false, nil + } +} + +// IsPersonalAccessReviewFromSAR this variant handles the case where we have an SAR +func IsPersonalAccessReviewFromSAR(sar *authorizationv1.SubjectAccessReview) bool { + if len(sar.User) == 0 && len(sar.GroupsSlice) == 0 { + return true + } + + return false +} + +// isPersonalAccessReviewFromLocalSAR this variant handles the case where we have a local SAR +func isPersonalAccessReviewFromLocalSAR(sar *authorizationv1.LocalSubjectAccessReview) bool { + if len(sar.User) == 0 && len(sar.GroupsSlice) == 0 { + return true + } + + return false +} + +var ( + sarScheme = runtime.NewScheme() + sarCodecFactory = serializer.NewCodecFactory(sarScheme) +) + +func init() { + utilruntime.Must(authorizationv1.Install(sarScheme)) + utilruntime.Must(authorizationv1.DeprecatedInstallWithoutGroup(sarScheme)) +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/project_request_info_resolver.go b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/project_request_info_resolver.go new file mode 100644 index 0000000000000..7682302f89a64 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/project_request_info_resolver.go @@ -0,0 +1,34 @@ +package apiserverconfig + +import ( + "net/http" + + apirequest "k8s.io/apiserver/pkg/endpoints/request" + + projectv1 "github.com/openshift/api/project/v1" +) + +type projectRequestInfoResolver struct { + // infoFactory is used to determine info for the request + infoFactory apirequest.RequestInfoResolver +} + +func newProjectRequestInfoResolver(infoFactory apirequest.RequestInfoResolver) apirequest.RequestInfoResolver { + return &projectRequestInfoResolver{ + infoFactory: infoFactory, + } +} + +func (a *projectRequestInfoResolver) NewRequestInfo(req *http.Request) (*apirequest.RequestInfo, error) { + requestInfo, err := a.infoFactory.NewRequestInfo(req) + if err != nil { + return requestInfo, err + } + + // if the resource is projects, we need to set the namespace to the value of the name. + if (len(requestInfo.APIGroup) == 0 || requestInfo.APIGroup == projectv1.GroupName) && requestInfo.Resource == "projects" && len(requestInfo.Name) > 0 { + requestInfo.Namespace = requestInfo.Name + } + + return requestInfo, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/requestinforesolver.go b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/requestinforesolver.go new file mode 100644 index 0000000000000..d14647d55a87a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/requestinforesolver.go @@ -0,0 +1,17 @@ +package apiserverconfig + +import ( + "k8s.io/apimachinery/pkg/util/sets" + apirequest "k8s.io/apiserver/pkg/endpoints/request" +) + +func OpenshiftRequestInfoResolver() apirequest.RequestInfoResolver { + // Default API request info factory + requestInfoFactory := &apirequest.RequestInfoFactory{ + APIPrefixes: sets.NewString("api", "apis"), + GrouplessAPIPrefixes: sets.NewString("api"), + } + personalSARRequestInfoResolver := newPersonalSARRequestInfoResolver(requestInfoFactory) + projectRequestInfoResolver := newProjectRequestInfoResolver(personalSARRequestInfoResolver) + return projectRequestInfoResolver +} diff --git a/vendor/github.com/openshift/library-go/pkg/authorization/authorizationutil/subject.go b/vendor/github.com/openshift/library-go/pkg/authorization/authorizationutil/subject.go new file mode 100644 index 0000000000000..74c179e686556 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/authorization/authorizationutil/subject.go @@ -0,0 +1,56 @@ +package authorizationutil + +import ( + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apiserver/pkg/authentication/serviceaccount" +) + +func BuildRBACSubjects(users, groups []string) []rbacv1.Subject { + subjects := []rbacv1.Subject{} + + for _, user := range users { + saNamespace, saName, err := serviceaccount.SplitUsername(user) + if err == nil { + subjects = append(subjects, rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: saNamespace, Name: saName}) + } else { + subjects = append(subjects, rbacv1.Subject{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: user}) + } + } + + for _, group := range groups { + subjects = append(subjects, rbacv1.Subject{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: group}) + } + + return subjects +} + +func RBACSubjectsToUsersAndGroups(subjects []rbacv1.Subject, defaultNamespace string) (users []string, groups []string) { + for _, subject := range subjects { + + switch { + case subject.APIGroup == rbacv1.GroupName && subject.Kind == rbacv1.GroupKind: + groups = append(groups, subject.Name) + case subject.APIGroup == rbacv1.GroupName && subject.Kind == rbacv1.UserKind: + users = append(users, subject.Name) + case subject.APIGroup == "" && subject.Kind == rbacv1.ServiceAccountKind: + // default the namespace to namespace we're working in if + // it's available. This allows rolebindings that reference + // SAs in the local namespace to avoid having to qualify + // them. + ns := defaultNamespace + if len(subject.Namespace) > 0 { + ns = subject.Namespace + } + if len(ns) > 0 { + name := serviceaccount.MakeUsername(ns, subject.Name) + users = append(users, name) + } else { + // maybe error? this fails safe at any rate + } + default: + // maybe error? This fails safe at any rate + } + } + + return users, groups +} diff --git a/vendor/github.com/openshift/library-go/pkg/authorization/authorizationutil/util.go b/vendor/github.com/openshift/library-go/pkg/authorization/authorizationutil/util.go new file mode 100644 index 0000000000000..040d0f643485c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/authorization/authorizationutil/util.go @@ -0,0 +1,50 @@ +package authorizationutil + +import ( + "context" + "errors" + + authorizationv1 "k8s.io/api/authorization/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/authentication/user" + authorizationclient "k8s.io/client-go/kubernetes/typed/authorization/v1" +) + +// AddUserToSAR adds the requisite user information to a SubjectAccessReview. +// It returns the modified SubjectAccessReview. +func AddUserToSAR(user user.Info, sar *authorizationv1.SubjectAccessReview) *authorizationv1.SubjectAccessReview { + sar.Spec.User = user.GetName() + // reminiscent of the bad old days of C. Copies copy the min number of elements of both source and dest + sar.Spec.Groups = make([]string, len(user.GetGroups())) + copy(sar.Spec.Groups, user.GetGroups()) + sar.Spec.Extra = map[string]authorizationv1.ExtraValue{} + + for k, v := range user.GetExtra() { + sar.Spec.Extra[k] = authorizationv1.ExtraValue(v) + } + + return sar +} + +// Authorize verifies that a given user is permitted to carry out a given +// action. If this cannot be determined, or if the user is not permitted, an +// error is returned. +func Authorize(sarClient authorizationclient.SubjectAccessReviewInterface, user user.Info, resourceAttributes *authorizationv1.ResourceAttributes) error { + sar := AddUserToSAR(user, &authorizationv1.SubjectAccessReview{ + Spec: authorizationv1.SubjectAccessReviewSpec{ + ResourceAttributes: resourceAttributes, + }, + }) + + resp, err := sarClient.Create(context.TODO(), sar, metav1.CreateOptions{}) + if err == nil && resp != nil && resp.Status.Allowed { + return nil + } + + if err == nil { + err = errors.New(resp.Status.Reason) + } + return kerrors.NewForbidden(schema.GroupResource{Group: resourceAttributes.Group, Resource: resourceAttributes.Resource}, resourceAttributes.Name, err) +} diff --git a/vendor/github.com/openshift/library-go/pkg/authorization/hardcodedauthorizer/metrics.go b/vendor/github.com/openshift/library-go/pkg/authorization/hardcodedauthorizer/metrics.go new file mode 100644 index 0000000000000..a5cf8f99a5cf3 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/authorization/hardcodedauthorizer/metrics.go @@ -0,0 +1,38 @@ +package hardcodedauthorizer + +import ( + "context" + + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +type metricsAuthorizer struct{} + +// GetUser() user.Info - checked +// GetVerb() string - checked +// IsReadOnly() bool - na +// GetNamespace() string - na +// GetResource() string - na +// GetSubresource() string - na +// GetName() string - na +// GetAPIGroup() string - na +// GetAPIVersion() string - na +// IsResourceRequest() bool - checked +// GetPath() string - checked +func (metricsAuthorizer) Authorize(ctx context.Context, a authorizer.Attributes) (authorized authorizer.Decision, reason string, err error) { + if a.GetUser().GetName() != "system:serviceaccount:openshift-monitoring:prometheus-k8s" { + return authorizer.DecisionNoOpinion, "", nil + } + if !a.IsResourceRequest() && + a.GetVerb() == "get" && + a.GetPath() == "/metrics" { + return authorizer.DecisionAllow, "requesting metrics is allowed", nil + } + + return authorizer.DecisionNoOpinion, "", nil +} + +// NewHardCodedMetricsAuthorizer returns a hardcoded authorizer for checking metrics. +func NewHardCodedMetricsAuthorizer() *metricsAuthorizer { + return new(metricsAuthorizer) +} diff --git a/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/clusterrole_describers.go b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/clusterrole_describers.go new file mode 100644 index 0000000000000..e9b7518f3ddca --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/clusterrole_describers.go @@ -0,0 +1,86 @@ +package scopemetadata + +import ( + "fmt" + "strings" +) + +// role:: +type ClusterRoleEvaluator struct{} + +var clusterRoleEvaluatorInstance = ClusterRoleEvaluator{} + +func (ClusterRoleEvaluator) Handles(scope string) bool { + return ClusterRoleEvaluatorHandles(scope) +} + +func (e ClusterRoleEvaluator) Validate(scope string) error { + _, _, _, err := ClusterRoleEvaluatorParseScope(scope) + return err +} + +func (e ClusterRoleEvaluator) Describe(scope string) (string, string, error) { + roleName, scopeNamespace, escalating, err := ClusterRoleEvaluatorParseScope(scope) + if err != nil { + return "", "", err + } + + // Anything you can do [in project "foo" | server-wide] that is also allowed by the "admin" role[, except access escalating resources like secrets] + + scopePhrase := "" + if scopeNamespace == scopesAllNamespaces { + scopePhrase = "server-wide" + } else { + scopePhrase = fmt.Sprintf("in project %q", scopeNamespace) + } + + warning := "" + escalatingPhrase := "" + if escalating { + warning = fmt.Sprintf("Includes access to escalating resources like secrets") + } else { + escalatingPhrase = ", except access escalating resources like secrets" + } + + description := fmt.Sprintf("Anything you can do %s that is also allowed by the %q role%s", scopePhrase, roleName, escalatingPhrase) + + return description, warning, nil +} + +func ClusterRoleEvaluatorHandles(scope string) bool { + return strings.HasPrefix(scope, clusterRoleIndicator) +} + +// ClusterRoleEvaluatorParseScope parses the requested scope, determining the requested role name, namespace, and if +// access to escalating objects is required. It will return an error if it doesn't parse cleanly +func ClusterRoleEvaluatorParseScope(scope string) (string /*role name*/, string /*namespace*/, bool /*escalating*/, error) { + if !ClusterRoleEvaluatorHandles(scope) { + return "", "", false, fmt.Errorf("bad format for scope %v", scope) + } + return parseClusterRoleScope(scope) +} + +func parseClusterRoleScope(scope string) (string /*role name*/, string /*namespace*/, bool /*escalating*/, error) { + if !strings.HasPrefix(scope, clusterRoleIndicator) { + return "", "", false, fmt.Errorf("bad format for scope %v", scope) + } + escalating := false + if strings.HasSuffix(scope, ":!") { + escalating = true + // clip that last segment before parsing the rest + scope = scope[:strings.LastIndex(scope, ":")] + } + + tokens := strings.SplitN(scope, ":", 2) + if len(tokens) != 2 { + return "", "", false, fmt.Errorf("bad format for scope %v", scope) + } + + // namespaces can't have colons, but roles can. pick last. + lastColonIndex := strings.LastIndex(tokens[1], ":") + if lastColonIndex <= 0 || lastColonIndex == (len(tokens[1])-1) { + return "", "", false, fmt.Errorf("bad format for scope %v", scope) + } + + return tokens[1][0:lastColonIndex], tokens[1][lastColonIndex+1:], escalating, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/describers.go b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/describers.go new file mode 100644 index 0000000000000..65280256c8aa3 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/describers.go @@ -0,0 +1,17 @@ +package scopemetadata + +// ScopeDescriber takes a scope and returns metadata about it +type ScopeDescriber interface { + // Handles returns true if this evaluator can evaluate this scope + Handles(scope string) bool + // Validate returns an error if the scope is malformed + Validate(scope string) error + // Describe returns a description, warning (typically used to warn about escalation dangers), or an error if the scope is malformed + Describe(scope string) (description string, warning string, err error) +} + +// ScopeDescribers map prefixes to a function that handles that prefix +var ScopeDescribers = []ScopeDescriber{ + UserEvaluator{}, + ClusterRoleEvaluator{}, +} diff --git a/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/user_describers.go b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/user_describers.go new file mode 100644 index 0000000000000..586a7d787ae56 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/user_describers.go @@ -0,0 +1,68 @@ +package scopemetadata + +import ( + "fmt" +) + +// these must agree with the scope authorizer, but it's an API we cannot realistically change +const ( + scopesAllNamespaces = "*" + + userIndicator = "user:" + clusterRoleIndicator = "role:" + + UserInfo = userIndicator + "info" + UserAccessCheck = userIndicator + "check-access" + + // UserListScopedProjects gives explicit permission to see the projects that this token can see. + UserListScopedProjects = userIndicator + "list-scoped-projects" + + // UserListAllProjects gives explicit permission to see the projects a user can see. This is often used to prime secondary ACL systems + // unrelated to openshift and to display projects for selection in a secondary UI. + UserListAllProjects = userIndicator + "list-projects" + + // UserFull includes all permissions of the user + userFull = userIndicator + "full" +) + +// user: +type UserEvaluator struct{} + +func (UserEvaluator) Handles(scope string) bool { + return UserEvaluatorHandles(scope) +} + +func (e UserEvaluator) Validate(scope string) error { + if e.Handles(scope) { + return nil + } + + return fmt.Errorf("unrecognized scope: %v", scope) +} + +var defaultSupportedScopesMap = map[string]string{ + UserInfo: "Read-only access to your user information (including username, identities, and group membership)", + UserAccessCheck: `Read-only access to view your privileges (for example, "can I create builds?")`, + UserListScopedProjects: `Read-only access to list your projects viewable with this token and view their metadata (display name, description, etc.)`, + UserListAllProjects: `Read-only access to list your projects and view their metadata (display name, description, etc.)`, + userFull: `Full read/write access with all of your permissions`, +} + +func (UserEvaluator) Describe(scope string) (string, string, error) { + switch scope { + case UserInfo, UserAccessCheck, UserListScopedProjects, UserListAllProjects: + return defaultSupportedScopesMap[scope], "", nil + case userFull: + return defaultSupportedScopesMap[scope], `Includes any access you have to escalating resources like secrets`, nil + default: + return "", "", fmt.Errorf("unrecognized scope: %v", scope) + } +} + +func UserEvaluatorHandles(scope string) bool { + switch scope { + case userFull, UserInfo, UserAccessCheck, UserListScopedProjects, UserListAllProjects: + return true + } + return false +} diff --git a/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/validation.go b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/validation.go new file mode 100644 index 0000000000000..59a7009b9ef5d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/validation.go @@ -0,0 +1,152 @@ +package scopemetadata + +import ( + "fmt" + + kutilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/validation/field" + + oauthv1 "github.com/openshift/api/oauth/v1" +) + +func ValidateScopes(scopes []string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(scopes) == 0 { + allErrs = append(allErrs, field.Required(fldPath, "may not be empty")) + } + + for i, scope := range scopes { + illegalCharacter := false + // https://tools.ietf.org/html/rfc6749#section-3.3 (full list of allowed chars is %x21 / %x23-5B / %x5D-7E) + // for those without an ascii table, that's `!`, `#-[`, `]-~` inclusive. + for _, ch := range scope { + switch { + case ch == '!': + case ch >= '#' && ch <= '[': + case ch >= ']' && ch <= '~': + default: + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), scope, fmt.Sprintf("%v not allowed", ch))) + illegalCharacter = true + } + } + if illegalCharacter { + continue + } + + found := false + for _, evaluator := range ScopeDescribers { + if !evaluator.Handles(scope) { + continue + } + + found = true + if err := evaluator.Validate(scope); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), scope, err.Error())) + break + } + } + + if !found { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), scope, "no scope handler found")) + } + } + + return allErrs +} + +func ValidateScopeRestrictions(client *oauthv1.OAuthClient, scopes ...string) error { + if len(scopes) == 0 { + return fmt.Errorf("%s may not request unscoped tokens", client.Name) + } + + if len(client.ScopeRestrictions) == 0 { + return nil + } + + errs := []error{} + for _, scope := range scopes { + if err := validateScopeRestrictions(client, scope); err != nil { + errs = append(errs, err) + } + } + + return kutilerrors.NewAggregate(errs) +} + +func validateScopeRestrictions(client *oauthv1.OAuthClient, scope string) error { + errs := []error{} + + for _, restriction := range client.ScopeRestrictions { + if len(restriction.ExactValues) > 0 { + if err := validateLiteralScopeRestrictions(scope, restriction.ExactValues); err != nil { + errs = append(errs, err) + continue + } + return nil + } + + if restriction.ClusterRole != nil { + if !ClusterRoleEvaluatorHandles(scope) { + continue + } + if err := validateClusterRoleScopeRestrictions(scope, *restriction.ClusterRole); err != nil { + errs = append(errs, err) + continue + } + return nil + } + } + + // if we got here, then nothing matched. If we already have errors, do nothing, otherwise add one to make it report failed. + if len(errs) == 0 { + errs = append(errs, fmt.Errorf("%v did not match any scope restriction", scope)) + } + + return kutilerrors.NewAggregate(errs) +} + +func validateLiteralScopeRestrictions(scope string, literals []string) error { + for _, literal := range literals { + if literal == scope { + return nil + } + } + + return fmt.Errorf("%v not found in %v", scope, literals) +} + +func validateClusterRoleScopeRestrictions(scope string, restriction oauthv1.ClusterRoleScopeRestriction) error { + role, namespace, escalating, err := ClusterRoleEvaluatorParseScope(scope) + if err != nil { + return err + } + + foundName := false + for _, restrictedRoleName := range restriction.RoleNames { + if restrictedRoleName == "*" || restrictedRoleName == role { + foundName = true + break + } + } + if !foundName { + return fmt.Errorf("%v does not use an approved name", scope) + } + + foundNamespace := false + for _, restrictedNamespace := range restriction.Namespaces { + if restrictedNamespace == "*" || restrictedNamespace == namespace { + foundNamespace = true + break + } + } + if !foundNamespace { + return fmt.Errorf("%v does not use an approved namespace", scope) + } + + if escalating && !restriction.AllowEscalation { + return fmt.Errorf("%v is not allowed to escalate", scope) + } + + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/client/openshiftrestmapper/hardcoded_restmapper.go b/vendor/github.com/openshift/library-go/pkg/client/openshiftrestmapper/hardcoded_restmapper.go new file mode 100644 index 0000000000000..a440a04a1b2d5 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/client/openshiftrestmapper/hardcoded_restmapper.go @@ -0,0 +1,229 @@ +package openshiftrestmapper + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// defaultRESTMappings contains enough RESTMappings to have enough of the kube-controller-manager succeed when running +// against a kube-apiserver that cannot reach aggregated APIs to do a full mapping. This happens when the OwnerReferencesPermissionEnforcement +// admission plugin runs to confirm permissions. Don't add things just because you don't want to fail. These are here so that +// we can start enough back up to get the rest of the system working correctly. +var defaultRESTMappings = []meta.RESTMapping{ + { + GroupVersionKind: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ReplicationController"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "replicationcontrollers"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Secret"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ServiceAccount"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "serviceaccounts"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ControllerRevision"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "controllerrevisions"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "DaemonSet"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "daemonsets"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "StatefulSet"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "statefulsets"}, + }, + // This is created so that cluster-bootstrap can always map securitycontextconstraints since the CRD doesn't have + // discovery. Discovery is delegated to the openshift-apiserver which doesn't not exist early in the bootstrapping + // phase. This leads to SCC related failures that we don't need to have. + { + GroupVersionKind: schema.GroupVersionKind{Group: "security.openshift.io", Version: "v1", Kind: "SecurityContextConstraints"}, + Scope: meta.RESTScopeRoot, + Resource: schema.GroupVersionResource{Group: "security.openshift.io", Version: "v1", Resource: "securitycontextconstraints"}, + }, + // This is created so that cluster-bootstrap can always map customresourcedefinitions, RBAC, machine resources so that CRDs and + // permissions are always created quickly. We observed discovery not including these on AWS OVN installations and + // the lack of CRDs and permissions blocked additional aspects of cluster bootstrapping. + { + GroupVersionKind: schema.GroupVersionKind{Group: "apiextensions.k8s.io", Version: "v1", Kind: "CustomResourceDefinition"}, + Scope: meta.RESTScopeRoot, + Resource: schema.GroupVersionResource{Group: "apiextensions.k8s.io", Version: "v1", Resource: "customresourcedefinitions"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRole"}, + Scope: meta.RESTScopeRoot, + Resource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRoleBinding"}, + Scope: meta.RESTScopeRoot, + Resource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterrolebindings"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "Role"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "roles"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "RoleBinding"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "rolebindings"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "machine.openshift.io", Version: "v1beta1", Kind: "Machine"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "machine.openshift.io", Version: "v1beta1", Resource: "machines"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "machine.openshift.io", Version: "v1beta1", Kind: "MachineSet"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "machine.openshift.io", Version: "v1beta1", Resource: "machinesets"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "machineconfiguration.openshift.io", Version: "v1", Kind: "MachineConfig"}, + Scope: meta.RESTScopeRoot, + Resource: schema.GroupVersionResource{Group: "machineconfiguration.openshift.io", Version: "v1", Resource: "machineconfigs"}, + }, + // This is here so cluster-bootstrap can always create the config instances that are used to drive our operators to avoid the + // excessive bootstrap wait that prevents installer from completing on AWS OVN + { + GroupVersionKind: schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "DNS"}, + Scope: meta.RESTScopeRoot, + Resource: schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "dnses"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Infrastructure"}, + Scope: meta.RESTScopeRoot, + Resource: schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "infrastructures"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Network"}, + Scope: meta.RESTScopeRoot, + Resource: schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "networks"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Ingress"}, + Scope: meta.RESTScopeRoot, + Resource: schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "ingresses"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Proxy"}, + Scope: meta.RESTScopeRoot, + Resource: schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "proxies"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Scheduler"}, + Scope: meta.RESTScopeRoot, + Resource: schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "schedulers"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "ClusterVersion"}, + Scope: meta.RESTScopeRoot, + Resource: schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "clusterversions"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "CloudCredential"}, + Scope: meta.RESTScopeRoot, + Resource: schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "cloudcredentials"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "monitoring.coreos.com", Version: "v1", Kind: "ServiceMonitor"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "servicemonitors"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "batch", Version: "v1", Kind: "Job"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "batch", Version: "v1", Resource: "jobs"}, + }, +} + +func NewOpenShiftHardcodedRESTMapper(delegate meta.RESTMapper) meta.RESTMapper { + ret := HardCodedFirstRESTMapper{ + Mapping: map[schema.GroupVersionKind]meta.RESTMapping{}, + RESTMapper: delegate, + } + for i := range defaultRESTMappings { + curr := defaultRESTMappings[i] + ret.Mapping[curr.GroupVersionKind] = curr + } + return ret +} + +// HardCodedFirstRESTMapper is a RESTMapper that will look for hardcoded mappings first, then delegate. +// This is done in service to `OwnerReferencesPermissionEnforcement` and for cluster-bootstrap. +type HardCodedFirstRESTMapper struct { + Mapping map[schema.GroupVersionKind]meta.RESTMapping + meta.RESTMapper +} + +var _ meta.RESTMapper = HardCodedFirstRESTMapper{} + +func (m HardCodedFirstRESTMapper) String() string { + return fmt.Sprintf("HardCodedRESTMapper{\n\t%v\n%v\n}", m.Mapping, m.RESTMapper) +} + +// RESTMapping is the only function called today. The first hit openshiftrestmapper ought to make this work right. OwnerReferencesPermissionEnforcement +// only ever calls with one version. +func (m HardCodedFirstRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + // not exactly one version, delegate + if len(versions) != 1 { + return m.RESTMapper.RESTMapping(gk, versions...) + } + gvk := gk.WithVersion(versions[0]) + + single, ok := m.Mapping[gvk] + // not handled, delegate + if !ok { + return m.RESTMapper.RESTMapping(gk, versions...) + } + + return &single, nil +} + +// RESTMapping is the only function called today. The firsthit openshiftrestmapper ought to make this work right. OwnerReferencesPermissionEnforcement +// only ever calls with one version. +func (m HardCodedFirstRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + // not exactly one version, delegate + if len(versions) != 1 { + return m.RESTMapper.RESTMappings(gk, versions...) + } + gvk := gk.WithVersion(versions[0]) + + single, ok := m.Mapping[gvk] + // not handled, delegate + if !ok { + return m.RESTMapper.RESTMappings(gk, versions...) + } + + return []*meta.RESTMapping{&single}, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/client/client_config.go b/vendor/github.com/openshift/library-go/pkg/config/client/client_config.go new file mode 100644 index 0000000000000..f6327946048ad --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/client/client_config.go @@ -0,0 +1,127 @@ +package client + +import ( + "net/http" + "os" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/network" +) + +// GetKubeConfigOrInClusterConfig loads in-cluster config if kubeConfigFile is empty or the file if not, +// then applies overrides. +func GetKubeConfigOrInClusterConfig(kubeConfigFile string, overrides *ClientConnectionOverrides) (*rest.Config, error) { + if len(kubeConfigFile) > 0 { + return GetClientConfig(kubeConfigFile, overrides) + } + + clientConfig, err := rest.InClusterConfig() + if err != nil { + return nil, err + } + + applyClientConnectionOverrides(overrides, clientConfig) + + t := ClientTransportOverrides{WrapTransport: clientConfig.WrapTransport} + if overrides != nil { + t.MaxIdleConnsPerHost = overrides.MaxIdleConnsPerHost + } + clientConfig.WrapTransport = t.DefaultClientTransport + + return clientConfig, nil +} + +// GetClientConfig returns the rest.Config for a kubeconfig file +func GetClientConfig(kubeConfigFile string, overrides *ClientConnectionOverrides) (*rest.Config, error) { + kubeConfigBytes, err := os.ReadFile(kubeConfigFile) + if err != nil { + return nil, err + } + kubeConfig, err := clientcmd.NewClientConfigFromBytes(kubeConfigBytes) + if err != nil { + return nil, err + } + clientConfig, err := kubeConfig.ClientConfig() + if err != nil { + return nil, err + } + applyClientConnectionOverrides(overrides, clientConfig) + + t := ClientTransportOverrides{WrapTransport: clientConfig.WrapTransport} + if overrides != nil { + t.MaxIdleConnsPerHost = overrides.MaxIdleConnsPerHost + } + clientConfig.WrapTransport = t.DefaultClientTransport + + return clientConfig, nil +} + +// applyClientConnectionOverrides updates a kubeConfig with the overrides from the config. +func applyClientConnectionOverrides(overrides *ClientConnectionOverrides, kubeConfig *rest.Config) { + if overrides == nil { + return + } + if overrides.QPS > 0 { + kubeConfig.QPS = overrides.QPS + } + if overrides.Burst > 0 { + kubeConfig.Burst = int(overrides.Burst) + } + if len(overrides.AcceptContentTypes) > 0 { + kubeConfig.ContentConfig.AcceptContentTypes = overrides.AcceptContentTypes + } + if len(overrides.ContentType) > 0 { + kubeConfig.ContentConfig.ContentType = overrides.ContentType + } + + // TODO both of these default values look wrong + // if we have no preferences at this point, claim that we accept both proto and json. We will get proto if the server supports it. + // this is a slightly niggly thing. If the server has proto and our client does not (possible, but not super likely) then this fails. + if len(kubeConfig.ContentConfig.AcceptContentTypes) == 0 { + kubeConfig.ContentConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" + } + if len(kubeConfig.ContentConfig.ContentType) == 0 { + kubeConfig.ContentConfig.ContentType = "application/vnd.kubernetes.protobuf" + } +} + +type ClientTransportOverrides struct { + WrapTransport func(rt http.RoundTripper) http.RoundTripper + MaxIdleConnsPerHost int +} + +// defaultClientTransport sets defaults for a client Transport that are suitable for use by infrastructure components. +func (c ClientTransportOverrides) DefaultClientTransport(rt http.RoundTripper) http.RoundTripper { + transport, ok := rt.(*http.Transport) + if !ok { + return rt + } + + transport.DialContext = network.DefaultClientDialContext() + + // Hold open more internal idle connections + transport.MaxIdleConnsPerHost = 100 + if c.MaxIdleConnsPerHost > 0 { + transport.MaxIdleConnsPerHost = c.MaxIdleConnsPerHost + } + + if c.WrapTransport == nil { + return transport + + } + return c.WrapTransport(transport) +} + +// ClientConnectionOverrides allows overriding values for rest.Config not held in a kubeconfig. Most commonly used +// for QPS. Empty values are not used. +type ClientConnectionOverrides struct { + configv1.ClientConnectionOverrides + + // MaxIdleConnsPerHost, if non-zero, controls the maximum idle (keep-alive) connections to keep per-host:port. + // If zero, DefaultMaxIdleConnsPerHost is used. + // TODO roll this into the connection overrides in api + MaxIdleConnsPerHost int +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/client/transport.go b/vendor/github.com/openshift/library-go/pkg/config/client/transport.go new file mode 100644 index 0000000000000..dfecd576621a8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/client/transport.go @@ -0,0 +1,92 @@ +package client + +import ( + "net" + "net/http" + "net/url" + + "k8s.io/client-go/rest" +) + +// AnonymousClientConfigWithWrapTransport returns a copy of the given config with all user credentials (cert/key, bearer token, and username/password) and custom transports (Transport) removed. +// This function preserves WrapTransport for clients that care about custom HTTP behavior. +func AnonymousClientConfigWithWrapTransport(config *rest.Config) *rest.Config { + newConfig := rest.AnonymousClientConfig(config) + newConfig.WrapTransport = config.WrapTransport + return newConfig +} + +// DefaultServerName extract the hostname from the config.Host and sets it in config.ServerName +// the ServerName is passed to the server for SNI and is used in the client to check server certificates. +// +// note: +// if the ServerName has been already specified calling this method has no effect +func DefaultServerName(config *rest.Config) error { + if len(config.ServerName) > 0 { + return nil + } + u, err := url.Parse(config.Host) + if err != nil { + return err + } + host, _, err := net.SplitHostPort(u.Host) + if err != nil { + // assume u.Host contains only host portion + config.ServerName = u.Host + return nil + } + config.ServerName = host + return nil +} + +// NewPreferredHostRoundTripper a simple middleware for changing the destination host for each request to the provided one. +// If the preferred host doesn't exists (an empty string) then this RT has no effect. +func NewPreferredHostRoundTripper(preferredHostFn func() string) func(http.RoundTripper) http.RoundTripper { + return func(rt http.RoundTripper) http.RoundTripper { + return &preferredHostRT{baseRT: rt, preferredHostFn: preferredHostFn} + } +} + +type preferredHostRT struct { + baseRT http.RoundTripper + preferredHostFn func() string +} + +func (rt *preferredHostRT) RoundTrip(r *http.Request) (*http.Response, error) { + preferredHost := rt.preferredHostFn() + + if len(preferredHost) == 0 { + return rt.baseRT.RoundTrip(r) + } + + r.Host = preferredHost + r.URL.Host = preferredHost + return rt.baseRT.RoundTrip(r) +} + +// CancelRequest exists to facilitate cancellation. +// +// In general there are at least three ways of cancelling a request by an HTTP client: +// 1. Transport.CancelRequest (depreciated) +// 2. Request.Cancel +// 3. Request.Context (preferred) +// +// While using client-go callers can specify a timeout value that gets passed directly to an http.Client. +// The HTTP client cancels requests to the underlying Transport as if the Request's Context ended. +// For compatibility, the Client will also use the deprecated CancelRequest method on Transport if found. +// New RoundTripper implementations should use the Request's Context for cancellation instead of implementing CancelRequest. +// +// Because this wrapper might be the first or might be actually wrapped with already existing wrappers that already implement CancelRequest we need to simply conform. +// +// See for more details: +// +// https://github.com/kubernetes/kubernetes/blob/442a69c3bdf6fe8e525b05887e57d89db1e2f3a5/staging/src/k8s.io/client-go/transport/transport.go#L257 +// https://github.com/kubernetes/kubernetes/blob/e29c568c4a9cd45d15665345aa015e21bcff52dd/staging/src/k8s.io/client-go/rest/config.go#L328 +// https://github.com/kubernetes/kubernetes/blob/3b2746c9ea9e0fa247b01dca27634e509b385eda/staging/src/k8s.io/client-go/transport/round_trippers.go#L302 +func (rt *preferredHostRT) CancelRequest(req *http.Request) { + type canceler interface{ CancelRequest(*http.Request) } + + if rtCanceller, ok := rt.baseRT.(canceler); ok { + rtCanceller.CancelRequest(req) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/configdefaults/config_default.go b/vendor/github.com/openshift/library-go/pkg/config/configdefaults/config_default.go new file mode 100644 index 0000000000000..7d3f44caf2a8e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/configdefaults/config_default.go @@ -0,0 +1,81 @@ +package configdefaults + +import ( + "time" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/crypto" +) + +func DefaultString(target *string, defaultVal string) { + if len(*target) == 0 { + *target = defaultVal + } +} + +func DefaultInt(target *int, defaultVal int) { + if *target == 0 { + *target = defaultVal + } +} + +func DefaultMetaDuration(target *time.Duration, defaultVal time.Duration) { + if *target == 0 { + *target = defaultVal + } +} + +func DefaultStringSlice(target *[]string, defaultVal []string) { + if len(*target) == 0 { + *target = defaultVal + } +} + +func SetRecommendedHTTPServingInfoDefaults(config *configv1.HTTPServingInfo) { + if config.MaxRequestsInFlight == 0 { + config.MaxRequestsInFlight = 3000 + } + if config.RequestTimeoutSeconds == 0 { + config.RequestTimeoutSeconds = 60 * 60 // one hour + } + + SetRecommendedServingInfoDefaults(&config.ServingInfo) +} + +func SetRecommendedServingInfoDefaults(config *configv1.ServingInfo) { + DefaultString(&config.BindAddress, "0.0.0.0:8443") + DefaultString(&config.BindNetwork, "tcp") + DefaultString(&config.CertInfo.KeyFile, "/var/run/secrets/serving-cert/tls.key") + DefaultString(&config.CertInfo.CertFile, "/var/run/secrets/serving-cert/tls.crt") + DefaultString(&config.ClientCA, "/var/run/configmaps/client-ca/ca-bundle.crt") + DefaultString(&config.MinTLSVersion, crypto.TLSVersionToNameOrDie(crypto.DefaultTLSVersion())) + + if len(config.CipherSuites) == 0 { + config.CipherSuites = crypto.CipherSuitesToNamesOrDie(crypto.DefaultCiphers()) + } +} + +func SetRecommendedGenericAPIServerConfigDefaults(config *configv1.GenericAPIServerConfig) { + SetRecommendedHTTPServingInfoDefaults(&config.ServingInfo) + SetRecommendedEtcdConnectionInfoDefaults(&config.StorageConfig.EtcdConnectionInfo) + SetRecommendedKubeClientConfigDefaults(&config.KubeClientConfig) +} + +func SetRecommendedEtcdConnectionInfoDefaults(config *configv1.EtcdConnectionInfo) { + DefaultStringSlice(&config.URLs, []string{"https://etcd.kube-system.svc:2379"}) + DefaultString(&config.CertInfo.KeyFile, "/var/run/secrets/etcd-client/tls.key") + DefaultString(&config.CertInfo.CertFile, "/var/run/secrets/etcd-client/tls.crt") + DefaultString(&config.CA, "/var/run/configmaps/etcd-serving-ca/ca-bundle.crt") +} + +func SetRecommendedKubeClientConfigDefaults(config *configv1.KubeClientConfig) { + // these are historical values + if config.ConnectionOverrides.QPS <= 0 { + config.ConnectionOverrides.QPS = 150.0 + } + if config.ConnectionOverrides.Burst <= 0 { + config.ConnectionOverrides.Burst = 300 + } + DefaultString(&config.ConnectionOverrides.AcceptContentTypes, "application/vnd.kubernetes.protobuf,application/json") + DefaultString(&config.ConnectionOverrides.ContentType, "application/vnd.kubernetes.protobuf") +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/helpers/client.go b/vendor/github.com/openshift/library-go/pkg/config/helpers/client.go new file mode 100644 index 0000000000000..98d4f8f892422 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/helpers/client.go @@ -0,0 +1,71 @@ +package helpers + +import ( + "os" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/config/client" +) + +// TODO this file needs to collapse with pkg/config/client. We cannot safely delegate from this file because this one +// TODO uses JSON and other uses protobuf. + +// GetKubeClientConfig loads in-cluster config if kubeConfigFile is empty or the file if not, then applies overrides. +func GetKubeClientConfig(kubeClientConnection configv1.KubeClientConfig) (*rest.Config, error) { + return GetKubeConfigOrInClusterConfig(kubeClientConnection.KubeConfig, kubeClientConnection.ConnectionOverrides) +} + +// GetKubeConfigOrInClusterConfig loads in-cluster config if kubeConfigFile is empty or the file if not, +// then applies overrides. +func GetKubeConfigOrInClusterConfig(kubeConfigFile string, overrides configv1.ClientConnectionOverrides) (*rest.Config, error) { + if len(kubeConfigFile) > 0 { + return GetClientConfig(kubeConfigFile, overrides) + } + + clientConfig, err := rest.InClusterConfig() + if err != nil { + return nil, err + } + applyClientConnectionOverrides(overrides, clientConfig) + clientConfig.WrapTransport = client.ClientTransportOverrides{WrapTransport: clientConfig.WrapTransport}.DefaultClientTransport + + return clientConfig, nil +} + +func GetClientConfig(kubeConfigFile string, overrides configv1.ClientConnectionOverrides) (*rest.Config, error) { + kubeConfigBytes, err := os.ReadFile(kubeConfigFile) + if err != nil { + return nil, err + } + kubeConfig, err := clientcmd.NewClientConfigFromBytes(kubeConfigBytes) + if err != nil { + return nil, err + } + clientConfig, err := kubeConfig.ClientConfig() + if err != nil { + return nil, err + } + applyClientConnectionOverrides(overrides, clientConfig) + clientConfig.WrapTransport = client.ClientTransportOverrides{WrapTransport: clientConfig.WrapTransport}.DefaultClientTransport + + return clientConfig, nil +} + +// applyClientConnectionOverrides updates a kubeConfig with the overrides from the config. +func applyClientConnectionOverrides(overrides configv1.ClientConnectionOverrides, kubeConfig *rest.Config) { + if overrides.QPS != 0 { + kubeConfig.QPS = overrides.QPS + } + if overrides.Burst != 0 { + kubeConfig.Burst = int(overrides.Burst) + } + if len(overrides.AcceptContentTypes) != 0 { + kubeConfig.ContentConfig.AcceptContentTypes = overrides.AcceptContentTypes + } + if len(overrides.ContentType) != 0 { + kubeConfig.ContentConfig.ContentType = overrides.ContentType + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/helpers/config_refs.go b/vendor/github.com/openshift/library-go/pkg/config/helpers/config_refs.go new file mode 100644 index 0000000000000..21d4d24f17366 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/helpers/config_refs.go @@ -0,0 +1,145 @@ +package helpers + +import ( + "strings" + + configv1 "github.com/openshift/api/config/v1" +) + +func GetHTTPServingInfoFileReferences(config *configv1.HTTPServingInfo) []*string { + if config == nil { + return []*string{} + } + + return GetServingInfoFileReferences(&config.ServingInfo) +} + +func GetServingInfoFileReferences(config *configv1.ServingInfo) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, GetCertFileReferences(&config.CertInfo)...) + refs = append(refs, &config.ClientCA) + for i := range config.NamedCertificates { + refs = append(refs, &config.NamedCertificates[i].CertFile) + refs = append(refs, &config.NamedCertificates[i].KeyFile) + } + + return refs +} + +func GetCertFileReferences(config *configv1.CertInfo) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, &config.CertFile) + refs = append(refs, &config.KeyFile) + return refs +} + +func GetRemoteConnectionInfoFileReferences(config *configv1.RemoteConnectionInfo) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, GetCertFileReferences(&config.CertInfo)...) + refs = append(refs, &config.CA) + return refs +} + +func GetEtcdConnectionInfoFileReferences(config *configv1.EtcdConnectionInfo) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, GetCertFileReferences(&config.CertInfo)...) + refs = append(refs, &config.CA) + return refs +} + +func GetStringSourceFileReferences(s *configv1.StringSource) []*string { + if s == nil { + return []*string{} + } + + return []*string{ + &s.File, + &s.KeyFile, + } +} + +func GetAdmissionPluginConfigFileReferences(config *configv1.AdmissionPluginConfig) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, &config.Location) + return refs +} + +func GetAuditConfigFileReferences(config *configv1.AuditConfig) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, &config.PolicyFile) + refs = append(refs, &config.AuditFilePath) + return refs +} + +func GetKubeClientConfigFileReferences(config *configv1.KubeClientConfig) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, &config.KubeConfig) + return refs +} + +func GetGenericAPIServerConfigFileReferences(config *configv1.GenericAPIServerConfig) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, GetHTTPServingInfoFileReferences(&config.ServingInfo)...) + refs = append(refs, GetEtcdConnectionInfoFileReferences(&config.StorageConfig.EtcdConnectionInfo)...) + refs = append(refs, GetAuditConfigFileReferences(&config.AuditConfig)...) + refs = append(refs, GetKubeClientConfigFileReferences(&config.KubeClientConfig)...) + + // TODO admission config file resolution is currently broken. + //for k := range config.AdmissionPluginConfig { + // refs = append(refs, GetAdmissionPluginConfigReferences(&(config.AdmissionPluginConfig[k]))...) + //} + return refs +} + +func GetFlagsWithFileExtensionsFileReferences(args map[string][]string) []*string { + if args == nil { + return []*string{} + } + + refs := []*string{} + for key, s := range args { + if len(s) == 0 { + continue + } + if !strings.HasSuffix(key, "-file") && !strings.HasSuffix(key, "-dir") { + continue + } + for i := range s { + refs = append(refs, &s[i]) + } + } + + return refs +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/helpers/general.go b/vendor/github.com/openshift/library-go/pkg/config/helpers/general.go new file mode 100644 index 0000000000000..fa7e4b46510be --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/helpers/general.go @@ -0,0 +1,64 @@ +package helpers + +import ( + "fmt" + "path/filepath" + "strings" +) + +// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory. +// Empty and "-" paths are never resolved. +func ResolvePaths(refs []*string, base string) error { + for _, ref := range refs { + // Don't resolve empty paths, or "-" + if len(*ref) > 0 && *ref != "-" { + // Don't resolve absolute paths + if !filepath.IsAbs(*ref) { + *ref = filepath.Join(base, *ref) + } + } + } + return nil +} + +func makeRelative(path, base string) (string, error) { + if len(path) > 0 && path != "-" { + rel, err := filepath.Rel(base, path) + if err != nil { + return path, err + } + return rel, nil + } + return path, nil +} + +// RelativizePathWithNoBacksteps updates the given refs to be relative paths, relative to the given base directory as long as they do not require backsteps. +// Any path requiring a backstep is left as-is as long it is absolute. Any non-absolute path that can't be relativized produces an error +// Empty and "-" paths are never relativized. +func RelativizePathWithNoBacksteps(refs []*string, base string) error { + for _, ref := range refs { + // Don't relativize empty paths, or "-" + if len(*ref) > 0 && *ref != "-" { + rel, err := makeRelative(*ref, base) + if err != nil { + return err + } + + if rel == "-" { + rel = "./-" + } + + // if we have a backstep, don't mess with the path + if strings.HasPrefix(rel, "../") { + if filepath.IsAbs(*ref) { + continue + } + + return fmt.Errorf("%v requires backsteps and is not absolute", *ref) + } + + *ref = rel + } + } + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/helpers/readresource.go b/vendor/github.com/openshift/library-go/pkg/config/helpers/readresource.go new file mode 100644 index 0000000000000..966cc527870f0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/helpers/readresource.go @@ -0,0 +1,166 @@ +package helpers + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + kyaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/klog/v2" + "sigs.k8s.io/yaml" +) + +// InstallFunc is the "normal" function for installing scheme +type InstallFunc func(scheme *runtime.Scheme) error + +// ReadYAMLToInternal reads content of a reader and returns the runtime.Object that matches it. It chooses the match from +// the scheme installation that you provide. It converts to internal for you. +func ReadYAMLToInternal(reader io.Reader, schemeFns ...InstallFunc) (runtime.Object, error) { + if reader == nil || reflect.ValueOf(reader).IsNil() { + return nil, nil + } + data, err := io.ReadAll(reader) + if err != nil { + return nil, err + } + jsonData, err := kyaml.ToJSON(data) + if err != nil { + // maybe we were already json + jsonData = data + } + + scheme := runtime.NewScheme() + for _, schemeFn := range schemeFns { + err := schemeFn(scheme) + if err != nil { + return nil, err + } + } + codec := serializer.NewCodecFactory(scheme).LegacyCodec(scheme.PrioritizedVersionsAllGroups()...) + + obj, err := runtime.Decode(codec, jsonData) + if err != nil { + return nil, captureSurroundingJSONForError("error reading config: ", jsonData, err) + } + // make sure there are no extra fields in jsonData + if err := strictDecodeCheck(jsonData, obj, scheme); err != nil { + return nil, err + } + + return obj, nil +} + +// ReadYAML reads content of a reader and returns the runtime.Object that matches it. It chooses the match from +// the scheme installation that you provide. It does not convert and it does not default. +func ReadYAML(reader io.Reader, schemeFns ...InstallFunc) (runtime.Object, error) { + if reader == nil || reflect.ValueOf(reader).IsNil() { + return nil, nil + } + data, err := io.ReadAll(reader) + if err != nil { + return nil, err + } + jsonData, err := kyaml.ToJSON(data) + if err != nil { + // maybe we were already json + jsonData = data + } + + scheme := runtime.NewScheme() + for _, schemeFn := range schemeFns { + err := schemeFn(scheme) + if err != nil { + return nil, err + } + } + codec := serializer.NewCodecFactory(scheme).UniversalDeserializer() + + obj, err := runtime.Decode(codec, jsonData) + if err != nil { + return nil, captureSurroundingJSONForError("error reading config: ", jsonData, err) + } + // make sure there are no extra fields in jsonData + if err := strictDecodeCheck(jsonData, obj, scheme); err != nil { + return nil, err + } + + return obj, nil +} + +// TODO: we ultimately want a better decoder for JSON that allows us exact line numbers and better +// surrounding text description. This should be removed / replaced when that happens. +func captureSurroundingJSONForError(prefix string, data []byte, err error) error { + if syntaxErr, ok := err.(*json.SyntaxError); err != nil && ok { + offset := syntaxErr.Offset + begin := offset - 20 + if begin < 0 { + begin = 0 + } + end := offset + 20 + if end > int64(len(data)) { + end = int64(len(data)) + } + return fmt.Errorf("%s%v (found near '%s')", prefix, err, string(data[begin:end])) + } + if err != nil { + return fmt.Errorf("%s%v", prefix, err) + } + return err +} + +// strictDecodeCheck fails decodes when jsonData contains fields not included in the external version of obj +func strictDecodeCheck(jsonData []byte, obj runtime.Object, scheme *runtime.Scheme) error { + out, err := getExternalZeroValue(obj, scheme) // we need the external version of obj as that has the correct JSON struct tags + if err != nil { + klog.Errorf("Encountered config error %v in object %T, raw JSON:\n%s", err, obj, string(jsonData)) // TODO just return the error and die + // never error for now, we need to determine a safe way to make this check fatal + return nil + } + d := json.NewDecoder(bytes.NewReader(jsonData)) + d.DisallowUnknownFields() + // note that we only care about the error, out is discarded + if err := d.Decode(out); err != nil { + klog.Errorf("Encountered config error %v in object %T, raw JSON:\n%s", err, obj, string(jsonData)) // TODO just return the error and die + } + // never error for now, we need to determine a safe way to make this check fatal + return nil +} + +// getExternalZeroValue returns the zero value of the external version of obj +func getExternalZeroValue(obj runtime.Object, scheme *runtime.Scheme) (runtime.Object, error) { + gvks, _, err := scheme.ObjectKinds(obj) + if err != nil { + return nil, err + } + if len(gvks) == 0 { // should never happen + return nil, fmt.Errorf("no gvks found for %#v", obj) + } + return scheme.New(gvks[0]) +} + +// WriteYAML serializes a yaml file based on the scheme functions provided +func WriteYAML(obj runtime.Object, schemeFns ...InstallFunc) ([]byte, error) { + scheme := runtime.NewScheme() + for _, schemeFn := range schemeFns { + err := schemeFn(scheme) + if err != nil { + return nil, err + } + } + codec := serializer.NewCodecFactory(scheme).LegacyCodec(scheme.PrioritizedVersionsAllGroups()...) + + json, err := runtime.Encode(codec, obj) + if err != nil { + return nil, err + } + + content, err := yaml.JSONToYAML(json) + if err != nil { + return nil, err + } + return content, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/validation/general.go b/vendor/github.com/openshift/library-go/pkg/config/validation/general.go new file mode 100644 index 0000000000000..bf47e7b5510c9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/validation/general.go @@ -0,0 +1,130 @@ +package validation + +import ( + "fmt" + "net" + "net/url" + "os" + "slices" + "strings" + + "k8s.io/apimachinery/pkg/util/validation/field" +) + +type ValidationResults struct { + Warnings field.ErrorList + Errors field.ErrorList +} + +func (r *ValidationResults) Append(additionalResults ValidationResults) { + r.AddErrors(additionalResults.Errors...) + r.AddWarnings(additionalResults.Warnings...) +} + +func (r *ValidationResults) AddErrors(errors ...*field.Error) { + if len(errors) == 0 { + return + } + r.Errors = append(r.Errors, errors...) +} + +func (r *ValidationResults) AddWarnings(warnings ...*field.Error) { + if len(warnings) == 0 { + return + } + r.Warnings = append(r.Warnings, warnings...) +} + +func ValidateHostPort(value string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(value) == 0 { + allErrs = append(allErrs, field.Required(fldPath, "")) + } else if _, _, err := net.SplitHostPort(value); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, value, "must be a host:port")) + } + + return allErrs +} + +func ValidateFile(path string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(path) == 0 { + allErrs = append(allErrs, field.Required(fldPath, "")) + } else if _, err := os.Stat(path); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, path, fmt.Sprintf("could not read file: %v", err))) + } + + return allErrs +} + +func ValidateSecureURL(urlString string, fldPath *field.Path) (*url.URL, field.ErrorList) { + url, urlErrs := ValidateURL(urlString, fldPath) + if len(urlErrs) == 0 && url.Scheme != "https" { + urlErrs = append(urlErrs, field.Invalid(fldPath, urlString, "must use https scheme")) + } + return url, urlErrs +} + +func ValidateURL(urlString string, fldPath *field.Path) (*url.URL, field.ErrorList) { + allErrs := field.ErrorList{} + + urlObj, err := url.Parse(urlString) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, urlString, "must be a valid URL")) + return nil, allErrs + } + if len(urlObj.Scheme) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath, urlString, "must contain a scheme (e.g. https://)")) + } + if len(urlObj.Host) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath, urlString, "must contain a host")) + } + return urlObj, allErrs +} + +func ValidateDir(path string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(path) == 0 { + allErrs = append(allErrs, field.Required(fldPath, "")) + } else { + fileInfo, err := os.Stat(path) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, path, fmt.Sprintf("could not read info: %v", err))) + } else if !fileInfo.IsDir() { + allErrs = append(allErrs, field.Invalid(fldPath, path, "not a directory")) + } + } + + return allErrs +} + +// HostnameMatchSpecCandidates returns a list of match specs that would match the provided hostname +// Returns nil if len(hostname) == 0 +func HostnameMatchSpecCandidates(hostname string) []string { + if len(hostname) == 0 { + return nil + } + + // Exact match has priority + candidates := []string{hostname} + + // Replace successive labels in the name with wildcards, to require an exact match on number of + // path segments, because certificates cannot wildcard multiple levels of subdomains + // + // This is primarily to be consistent with tls.Config#getCertificate implementation + // + // It using a cert signed for *.foo.example.com and *.bar.example.com by specifying the name *.*.example.com + labels := strings.Split(hostname, ".") + for i := range labels { + labels[i] = "*" + candidates = append(candidates, strings.Join(labels, ".")) + } + return candidates +} + +// HostnameMatches returns true if the given hostname is matched by the given matchSpec +func HostnameMatches(hostname string, matchSpec string) bool { + return slices.Contains(HostnameMatchSpecCandidates(hostname), matchSpec) +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/validation/serving_info.go b/vendor/github.com/openshift/library-go/pkg/config/validation/serving_info.go new file mode 100644 index 0000000000000..e079a227651ee --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/validation/serving_info.go @@ -0,0 +1,174 @@ +package validation + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" + utilvalidation "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/crypto" +) + +func ValidateHTTPServingInfo(info configv1.HTTPServingInfo, fldPath *field.Path) ValidationResults { + validationResults := ValidationResults{} + + validationResults.Append(ValidateServingInfo(info.ServingInfo, true, fldPath)) + + if info.MaxRequestsInFlight < 0 { + validationResults.AddErrors(field.Invalid(fldPath.Child("maxRequestsInFlight"), info.MaxRequestsInFlight, "must be zero (no limit) or greater")) + } + + if info.RequestTimeoutSeconds < -1 { + validationResults.AddErrors(field.Invalid(fldPath.Child("requestTimeoutSeconds"), info.RequestTimeoutSeconds, "must be -1 (no timeout), 0 (default timeout), or greater")) + } + + return validationResults +} + +func ValidateServingInfo(info configv1.ServingInfo, certificatesRequired bool, fldPath *field.Path) ValidationResults { + validationResults := ValidationResults{} + + validationResults.AddErrors(ValidateHostPort(info.BindAddress, fldPath.Child("bindAddress"))...) + validationResults.AddErrors(ValidateCertInfo(info.CertInfo, certificatesRequired, fldPath)...) + + if len(info.NamedCertificates) > 0 && len(info.CertFile) == 0 { + validationResults.AddErrors(field.Invalid(fldPath.Child("namedCertificates"), "", "a default certificate and key is required in certFile/keyFile in order to use namedCertificates")) + } + + validationResults.Append(ValidateNamedCertificates(fldPath.Child("namedCertificates"), info.NamedCertificates)) + + switch info.BindNetwork { + case "tcp", "tcp4", "tcp6": + default: + validationResults.AddErrors(field.Invalid(fldPath.Child("bindNetwork"), info.BindNetwork, "must be 'tcp', 'tcp4', or 'tcp6'")) + } + + if len(info.CertFile) > 0 { + if len(info.ClientCA) > 0 { + validationResults.AddErrors(ValidateFile(info.ClientCA, fldPath.Child("clientCA"))...) + } + } else { + if certificatesRequired && len(info.ClientCA) > 0 { + validationResults.AddErrors(field.Invalid(fldPath.Child("clientCA"), info.ClientCA, "cannot specify a clientCA without a certFile")) + } + } + + if _, err := crypto.TLSVersion(info.MinTLSVersion); err != nil { + validationResults.AddErrors(field.NotSupported(fldPath.Child("minTLSVersion"), info.MinTLSVersion, crypto.ValidTLSVersions())) + } + for i, cipher := range info.CipherSuites { + if _, err := crypto.CipherSuite(cipher); err != nil { + validationResults.AddErrors(field.NotSupported(fldPath.Child("cipherSuites").Index(i), cipher, crypto.ValidCipherSuites())) + } + } + + return validationResults +} + +func ValidateNamedCertificates(fldPath *field.Path, namedCertificates []configv1.NamedCertificate) ValidationResults { + validationResults := ValidationResults{} + + takenNames := sets.New[string]() + for i, namedCertificate := range namedCertificates { + idxPath := fldPath.Index(i) + + certDNSNames := []string{} + if len(namedCertificate.CertFile) == 0 { + validationResults.AddErrors(field.Required(idxPath.Child("certInfo"), "")) + } else if certInfoErrors := ValidateCertInfo(namedCertificate.CertInfo, false, idxPath); len(certInfoErrors) > 0 { + validationResults.AddErrors(certInfoErrors...) + } else if cert, err := tls.LoadX509KeyPair(namedCertificate.CertFile, namedCertificate.KeyFile); err != nil { + validationResults.AddErrors(field.Invalid(idxPath.Child("certInfo"), namedCertificate.CertInfo, fmt.Sprintf("error loading certificate/key: %v", err))) + } else { + leaf, _ := x509.ParseCertificate(cert.Certificate[0]) + certDNSNames = append(certDNSNames, leaf.Subject.CommonName) + certDNSNames = append(certDNSNames, leaf.DNSNames...) + } + + if len(namedCertificate.Names) == 0 { + validationResults.AddErrors(field.Required(idxPath.Child("names"), "")) + } + for j, name := range namedCertificate.Names { + jdxPath := idxPath.Child("names").Index(j) + if len(name) == 0 { + validationResults.AddErrors(field.Required(jdxPath, "")) + continue + } + + if takenNames.Has(name) { + validationResults.AddErrors(field.Invalid(jdxPath, name, "this name is already used in another named certificate")) + continue + } + + // validate names as domain names or *.*.foo.com domain names + validDNSName := true + for _, s := range strings.Split(name, ".") { + if s != "*" && len(utilvalidation.IsDNS1123Label(s)) != 0 { + validDNSName = false + } + } + if !validDNSName { + validationResults.AddErrors(field.Invalid(jdxPath, name, "must be a valid DNS name")) + continue + } + + takenNames.Insert(name) + + // validate certificate has common name or subject alt names that match + if len(certDNSNames) > 0 { + foundMatch := false + for _, dnsName := range certDNSNames { + if HostnameMatches(dnsName, name) { + foundMatch = true + break + } + // if the cert has a wildcard dnsName, and we've configured a non-wildcard name, see if our specified name will match against the dnsName. + if strings.HasPrefix(dnsName, "*.") && !strings.HasPrefix(name, "*.") && HostnameMatches(name, dnsName) { + foundMatch = true + break + } + } + if !foundMatch { + validationResults.AddWarnings(field.Invalid(jdxPath, name, "the specified certificate does not have a CommonName or DNS subjectAltName that matches this name")) + } + } + } + } + + return validationResults +} + +func ValidateCertInfo(certInfo configv1.CertInfo, required bool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if required { + if len(certInfo.CertFile) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("certFile"), "The certificate file must be provided")) + } + if len(certInfo.KeyFile) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("keyFile"), "The certificate key must be provided")) + } + } + + if (len(certInfo.CertFile) == 0) != (len(certInfo.KeyFile) == 0) { + allErrs = append(allErrs, field.Required(fldPath.Child("certFile"), "Both the certificate file and the certificate key must be provided together or not at all")) + allErrs = append(allErrs, field.Required(fldPath.Child("keyFile"), "Both the certificate file and the certificate key must be provided together or not at all")) + } + + if len(certInfo.CertFile) > 0 { + allErrs = append(allErrs, ValidateFile(certInfo.CertFile, fldPath.Child("certFile"))...) + } + + if len(certInfo.KeyFile) > 0 { + allErrs = append(allErrs, ValidateFile(certInfo.KeyFile, fldPath.Child("keyFile"))...) + } + + // validate certfile/keyfile load/parse? + + return allErrs +} diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/OWNERS b/vendor/github.com/openshift/library-go/pkg/crypto/OWNERS new file mode 100644 index 0000000000000..4d4ce5ab9efd8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/OWNERS @@ -0,0 +1,4 @@ +reviewers: + - stlaz +approvers: + - stlaz diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go new file mode 100644 index 0000000000000..e6651fecc2c6e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go @@ -0,0 +1,1221 @@ +package crypto + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + mathrand "math/rand" + "net" + "os" + "path/filepath" + "reflect" + "sort" + "strconv" + "sync" + "time" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/client-go/util/cert" +) + +// TLS versions that are known to golang. Go 1.13 adds support for +// TLS 1.3 that's opt-out with a build flag. +var versions = map[string]uint16{ + "VersionTLS10": tls.VersionTLS10, + "VersionTLS11": tls.VersionTLS11, + "VersionTLS12": tls.VersionTLS12, + "VersionTLS13": tls.VersionTLS13, +} + +// TLS versions that are enabled. +var supportedVersions = map[string]uint16{ + "VersionTLS10": tls.VersionTLS10, + "VersionTLS11": tls.VersionTLS11, + "VersionTLS12": tls.VersionTLS12, + "VersionTLS13": tls.VersionTLS13, +} + +// TLSVersionToNameOrDie given a tls version as an int, return its readable name +func TLSVersionToNameOrDie(intVal uint16) string { + matches := []string{} + for key, version := range versions { + if version == intVal { + matches = append(matches, key) + } + } + + if len(matches) == 0 { + panic(fmt.Sprintf("no name found for %d", intVal)) + } + if len(matches) > 1 { + panic(fmt.Sprintf("multiple names found for %d: %v", intVal, matches)) + } + return matches[0] +} + +func TLSVersion(versionName string) (uint16, error) { + if len(versionName) == 0 { + return DefaultTLSVersion(), nil + } + if version, ok := versions[versionName]; ok { + return version, nil + } + return 0, fmt.Errorf("unknown tls version %q", versionName) +} +func TLSVersionOrDie(versionName string) uint16 { + version, err := TLSVersion(versionName) + if err != nil { + panic(err) + } + return version +} + +// TLS versions that are known to golang, but may not necessarily be enabled. +func GolangTLSVersions() []string { + supported := []string{} + for k := range versions { + supported = append(supported, k) + } + sort.Strings(supported) + return supported +} + +// Returns the build enabled TLS versions. +func ValidTLSVersions() []string { + validVersions := []string{} + for k := range supportedVersions { + validVersions = append(validVersions, k) + } + sort.Strings(validVersions) + return validVersions +} +func DefaultTLSVersion() uint16 { + // Can't use SSLv3 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + return tls.VersionTLS12 +} + +// ciphersTLS13 copies golang 1.13 implementation, where TLS1.3 suites are not +// configurable (cipherSuites field is ignored for TLS1.3 flows and all of the +// below three - and none other - are used) +var ciphersTLS13 = map[string]uint16{ + "TLS_AES_128_GCM_SHA256": tls.TLS_AES_128_GCM_SHA256, + "TLS_AES_256_GCM_SHA384": tls.TLS_AES_256_GCM_SHA384, + "TLS_CHACHA20_POLY1305_SHA256": tls.TLS_CHACHA20_POLY1305_SHA256, +} + +var ciphers = map[string]uint16{ + "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, + "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, + "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, +} + +// openSSLToIANACiphersMap maps OpenSSL cipher suite names to IANA names +// ref: https://www.iana.org/assignments/tls-parameters/tls-parameters.xml +var openSSLToIANACiphersMap = map[string]string{ + // TLS 1.3 ciphers - not configurable in go 1.13, all of them are used in TLSv1.3 flows + "TLS_AES_128_GCM_SHA256": "TLS_AES_128_GCM_SHA256", // 0x13,0x01 + "TLS_AES_256_GCM_SHA384": "TLS_AES_256_GCM_SHA384", // 0x13,0x02 + "TLS_CHACHA20_POLY1305_SHA256": "TLS_CHACHA20_POLY1305_SHA256", // 0x13,0x03 + + // TLS 1.2 + "ECDHE-ECDSA-AES128-GCM-SHA256": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", // 0xC0,0x2B + "ECDHE-RSA-AES128-GCM-SHA256": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", // 0xC0,0x2F + "ECDHE-ECDSA-AES256-GCM-SHA384": "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", // 0xC0,0x2C + "ECDHE-RSA-AES256-GCM-SHA384": "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", // 0xC0,0x30 + "ECDHE-ECDSA-CHACHA20-POLY1305": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", // 0xCC,0xA9 + "ECDHE-RSA-CHACHA20-POLY1305": "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", // 0xCC,0xA8 + "ECDHE-ECDSA-AES128-SHA256": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", // 0xC0,0x23 + "ECDHE-RSA-AES128-SHA256": "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", // 0xC0,0x27 + "AES128-GCM-SHA256": "TLS_RSA_WITH_AES_128_GCM_SHA256", // 0x00,0x9C + "AES256-GCM-SHA384": "TLS_RSA_WITH_AES_256_GCM_SHA384", // 0x00,0x9D + "AES128-SHA256": "TLS_RSA_WITH_AES_128_CBC_SHA256", // 0x00,0x3C + + // TLS 1 + "ECDHE-ECDSA-AES128-SHA": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", // 0xC0,0x09 + "ECDHE-RSA-AES128-SHA": "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", // 0xC0,0x13 + "ECDHE-ECDSA-AES256-SHA": "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", // 0xC0,0x0A + "ECDHE-RSA-AES256-SHA": "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", // 0xC0,0x14 + + // SSL 3 + "AES128-SHA": "TLS_RSA_WITH_AES_128_CBC_SHA", // 0x00,0x2F + "AES256-SHA": "TLS_RSA_WITH_AES_256_CBC_SHA", // 0x00,0x35 + "DES-CBC3-SHA": "TLS_RSA_WITH_3DES_EDE_CBC_SHA", // 0x00,0x0A +} + +// CipherSuitesToNamesOrDie given a list of cipher suites as ints, return their readable names +func CipherSuitesToNamesOrDie(intVals []uint16) []string { + ret := []string{} + for _, intVal := range intVals { + ret = append(ret, CipherSuiteToNameOrDie(intVal)) + } + + return ret +} + +// CipherSuiteToNameOrDie given a cipher suite as an int, return its readable name +func CipherSuiteToNameOrDie(intVal uint16) string { + // The following suite ids appear twice in the cipher map (with + // and without the _SHA256 suffix) for the purposes of backwards + // compatibility. Always return the current rather than the legacy + // name. + switch intVal { + case tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256: + return "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256" + case tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256: + return "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256" + } + + matches := []string{} + for key, version := range ciphers { + if version == intVal { + matches = append(matches, key) + } + } + + if len(matches) == 0 { + panic(fmt.Sprintf("no name found for %d", intVal)) + } + if len(matches) > 1 { + panic(fmt.Sprintf("multiple names found for %d: %v", intVal, matches)) + } + return matches[0] +} + +func CipherSuite(cipherName string) (uint16, error) { + if cipher, ok := ciphers[cipherName]; ok { + return cipher, nil + } + + if _, ok := ciphersTLS13[cipherName]; ok { + return 0, fmt.Errorf("all golang TLSv1.3 ciphers are always used for TLSv1.3 flows") + } + + return 0, fmt.Errorf("unknown cipher name %q", cipherName) +} + +func CipherSuitesOrDie(cipherNames []string) []uint16 { + if len(cipherNames) == 0 { + return DefaultCiphers() + } + cipherValues := []uint16{} + for _, cipherName := range cipherNames { + cipher, err := CipherSuite(cipherName) + if err != nil { + panic(err) + } + cipherValues = append(cipherValues, cipher) + } + return cipherValues +} +func ValidCipherSuites() []string { + validCipherSuites := []string{} + for k := range ciphers { + validCipherSuites = append(validCipherSuites, k) + } + sort.Strings(validCipherSuites) + return validCipherSuites +} +func DefaultCiphers() []uint16 { + // HTTP/2 mandates TLS 1.2 or higher with an AEAD cipher + // suite (GCM, Poly1305) and ephemeral key exchange (ECDHE, DHE) for + // perfect forward secrecy. Servers may provide additional cipher + // suites for backwards compatibility with HTTP/1.1 clients. + // See RFC7540, section 9.2 (Use of TLS Features) and Appendix A + // (TLS 1.2 Cipher Suite Black List). + return []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // required by http/2 + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, // forbidden by http/2, not flagged by http2isBadCipher() in go1.8 + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, // forbidden by http/2, not flagged by http2isBadCipher() in go1.8 + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, // forbidden by http/2 + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, // forbidden by http/2 + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, // forbidden by http/2 + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, // forbidden by http/2 + tls.TLS_RSA_WITH_AES_128_GCM_SHA256, // forbidden by http/2 + tls.TLS_RSA_WITH_AES_256_GCM_SHA384, // forbidden by http/2 + // the next one is in the intermediate suite, but go1.8 http2isBadCipher() complains when it is included at the recommended index + // because it comes after ciphers forbidden by the http/2 spec + // tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + // tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, // forbidden by http/2, disabled to mitigate SWEET32 attack + // tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, // forbidden by http/2, disabled to mitigate SWEET32 attack + tls.TLS_RSA_WITH_AES_128_CBC_SHA, // forbidden by http/2 + tls.TLS_RSA_WITH_AES_256_CBC_SHA, // forbidden by http/2 + } +} + +// SecureTLSConfig enforces the default minimum security settings for the cluster. +func SecureTLSConfig(config *tls.Config) *tls.Config { + if config.MinVersion == 0 { + config.MinVersion = DefaultTLSVersion() + } + + config.PreferServerCipherSuites = true + if len(config.CipherSuites) == 0 { + config.CipherSuites = DefaultCiphers() + } + return config +} + +// OpenSSLToIANACipherSuites maps input OpenSSL Cipher Suite names to their +// IANA counterparts. +// Unknown ciphers are left out. +func OpenSSLToIANACipherSuites(ciphers []string) []string { + ianaCiphers := make([]string, 0, len(ciphers)) + + for _, c := range ciphers { + ianaCipher, found := openSSLToIANACiphersMap[c] + if found { + ianaCiphers = append(ianaCiphers, ianaCipher) + } + } + + return ianaCiphers +} + +type TLSCertificateConfig struct { + Certs []*x509.Certificate + Key crypto.PrivateKey +} + +type TLSCARoots struct { + Roots []*x509.Certificate +} + +func (c *TLSCertificateConfig) WriteCertConfigFile(certFile, keyFile string) error { + // ensure parent dir + if err := os.MkdirAll(filepath.Dir(certFile), os.FileMode(0755)); err != nil { + return err + } + certFileWriter, err := os.OpenFile(certFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(keyFile), os.FileMode(0755)); err != nil { + return err + } + keyFileWriter, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + + if err := writeCertificates(certFileWriter, c.Certs...); err != nil { + return err + } + if err := writeKeyFile(keyFileWriter, c.Key); err != nil { + return err + } + + if err := certFileWriter.Close(); err != nil { + return err + } + if err := keyFileWriter.Close(); err != nil { + return err + } + + return nil +} + +func (c *TLSCertificateConfig) WriteCertConfig(certFile, keyFile io.Writer) error { + if err := writeCertificates(certFile, c.Certs...); err != nil { + return err + } + if err := writeKeyFile(keyFile, c.Key); err != nil { + return err + } + return nil +} + +func (c *TLSCertificateConfig) GetPEMBytes() ([]byte, []byte, error) { + certBytes, err := EncodeCertificates(c.Certs...) + if err != nil { + return nil, nil, err + } + keyBytes, err := EncodeKey(c.Key) + if err != nil { + return nil, nil, err + } + + return certBytes, keyBytes, nil +} + +func GetTLSCertificateConfig(certFile, keyFile string) (*TLSCertificateConfig, error) { + if len(certFile) == 0 { + return nil, errors.New("certFile missing") + } + if len(keyFile) == 0 { + return nil, errors.New("keyFile missing") + } + + certPEMBlock, err := os.ReadFile(certFile) + if err != nil { + return nil, err + } + certs, err := cert.ParseCertsPEM(certPEMBlock) + if err != nil { + return nil, fmt.Errorf("Error reading %s: %s", certFile, err) + } + + keyPEMBlock, err := os.ReadFile(keyFile) + if err != nil { + return nil, err + } + keyPairCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) + if err != nil { + return nil, err + } + key := keyPairCert.PrivateKey + + return &TLSCertificateConfig{certs, key}, nil +} + +func GetTLSCertificateConfigFromBytes(certBytes, keyBytes []byte) (*TLSCertificateConfig, error) { + if len(certBytes) == 0 { + return nil, errors.New("certFile missing") + } + if len(keyBytes) == 0 { + return nil, errors.New("keyFile missing") + } + + certs, err := cert.ParseCertsPEM(certBytes) + if err != nil { + return nil, fmt.Errorf("Error reading cert: %s", err) + } + + keyPairCert, err := tls.X509KeyPair(certBytes, keyBytes) + if err != nil { + return nil, err + } + key := keyPairCert.PrivateKey + + return &TLSCertificateConfig{certs, key}, nil +} + +const ( + DefaultCertificateLifetimeInDays = 365 * 2 // 2 years + DefaultCACertificateLifetimeInDays = 365 * 5 // 5 years + + // Default keys are 2048 bits + keyBits = 2048 +) + +type CA struct { + Config *TLSCertificateConfig + + SerialGenerator SerialGenerator +} + +// SerialGenerator is an interface for getting a serial number for the cert. It MUST be thread-safe. +type SerialGenerator interface { + Next(template *x509.Certificate) (int64, error) +} + +// SerialFileGenerator returns a unique, monotonically increasing serial number and ensures the CA on disk records that value. +type SerialFileGenerator struct { + SerialFile string + + // lock guards access to the Serial field + lock sync.Mutex + Serial int64 +} + +func NewSerialFileGenerator(serialFile string) (*SerialFileGenerator, error) { + // read serial file, it must already exist + serial, err := fileToSerial(serialFile) + if err != nil { + return nil, err + } + + generator := &SerialFileGenerator{ + Serial: serial, + SerialFile: serialFile, + } + + // 0 is unused and 1 is reserved for the CA itself + // Thus we need to guarantee that the first external call to SerialFileGenerator.Next returns 2+ + // meaning that SerialFileGenerator.Serial must not be less than 1 (it is guaranteed to be non-negative) + if generator.Serial < 1 { + // fake a call to Next so the file stays in sync and Serial is incremented + if _, err := generator.Next(&x509.Certificate{}); err != nil { + return nil, err + } + } + + return generator, nil +} + +// Next returns a unique, monotonically increasing serial number and ensures the CA on disk records that value. +func (s *SerialFileGenerator) Next(template *x509.Certificate) (int64, error) { + s.lock.Lock() + defer s.lock.Unlock() + + // do a best effort check to make sure concurrent external writes are not occurring to the underlying serial file + serial, err := fileToSerial(s.SerialFile) + if err != nil { + return 0, err + } + if serial != s.Serial { + return 0, fmt.Errorf("serial file %s out of sync ram=%d disk=%d", s.SerialFile, s.Serial, serial) + } + + next := s.Serial + 1 + s.Serial = next + + // Output in hex, padded to multiples of two characters for OpenSSL's sake + serialText := fmt.Sprintf("%X", next) + if len(serialText)%2 == 1 { + serialText = "0" + serialText + } + // always add a newline at the end to have a valid file + serialText += "\n" + + if err := os.WriteFile(s.SerialFile, []byte(serialText), os.FileMode(0640)); err != nil { + return 0, err + } + return next, nil +} + +func fileToSerial(serialFile string) (int64, error) { + serialData, err := os.ReadFile(serialFile) + if err != nil { + return 0, err + } + + // read the file as a single hex number after stripping any whitespace + serial, err := strconv.ParseInt(string(bytes.TrimSpace(serialData)), 16, 64) + if err != nil { + return 0, err + } + + if serial < 0 { + return 0, fmt.Errorf("invalid negative serial %d in serial file %s", serial, serialFile) + } + + return serial, nil +} + +// RandomSerialGenerator returns a serial based on time.Now and the subject +type RandomSerialGenerator struct { +} + +func (s *RandomSerialGenerator) Next(template *x509.Certificate) (int64, error) { + return randomSerialNumber(), nil +} + +// randomSerialNumber returns a random int64 serial number based on +// time.Now. It is defined separately from the generator interface so +// that the caller doesn't have to worry about an input template or +// error - these are unnecessary when creating a random serial. +func randomSerialNumber() int64 { + r := mathrand.New(mathrand.NewSource(time.Now().UTC().UnixNano())) + return r.Int63() +} + +// EnsureCA returns a CA, whether it was created (as opposed to pre-existing), and any error +// if serialFile is empty, a RandomSerialGenerator will be used +func EnsureCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, bool, error) { + if ca, err := GetCA(certFile, keyFile, serialFile); err == nil { + return ca, false, err + } + ca, err := MakeSelfSignedCA(certFile, keyFile, serialFile, name, expireDays) + return ca, true, err +} + +// if serialFile is empty, a RandomSerialGenerator will be used +func GetCA(certFile, keyFile, serialFile string) (*CA, error) { + caConfig, err := GetTLSCertificateConfig(certFile, keyFile) + if err != nil { + return nil, err + } + + var serialGenerator SerialGenerator + if len(serialFile) > 0 { + serialGenerator, err = NewSerialFileGenerator(serialFile) + if err != nil { + return nil, err + } + } else { + serialGenerator = &RandomSerialGenerator{} + } + + return &CA{ + SerialGenerator: serialGenerator, + Config: caConfig, + }, nil +} + +func GetCAFromBytes(certBytes, keyBytes []byte) (*CA, error) { + caConfig, err := GetTLSCertificateConfigFromBytes(certBytes, keyBytes) + if err != nil { + return nil, err + } + + return &CA{ + SerialGenerator: &RandomSerialGenerator{}, + Config: caConfig, + }, nil +} + +// if serialFile is empty, a RandomSerialGenerator will be used +func MakeSelfSignedCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, error) { + klog.V(2).Infof("Generating new CA for %s cert, and key in %s, %s", name, certFile, keyFile) + + caConfig, err := MakeSelfSignedCAConfig(name, expireDays) + if err != nil { + return nil, err + } + if err := caConfig.WriteCertConfigFile(certFile, keyFile); err != nil { + return nil, err + } + + var serialGenerator SerialGenerator + if len(serialFile) > 0 { + // create / overwrite the serial file with a zero padded hex value (ending in a newline to have a valid file) + if err := os.WriteFile(serialFile, []byte("00\n"), 0644); err != nil { + return nil, err + } + serialGenerator, err = NewSerialFileGenerator(serialFile) + if err != nil { + return nil, err + } + } else { + serialGenerator = &RandomSerialGenerator{} + } + + return &CA{ + SerialGenerator: serialGenerator, + Config: caConfig, + }, nil +} + +func MakeSelfSignedCAConfig(name string, expireDays int) (*TLSCertificateConfig, error) { + subject := pkix.Name{CommonName: name} + return MakeSelfSignedCAConfigForSubject(subject, expireDays) +} + +func MakeSelfSignedCAConfigForSubject(subject pkix.Name, expireDays int) (*TLSCertificateConfig, error) { + var caLifetimeInDays = DefaultCACertificateLifetimeInDays + if expireDays > 0 { + caLifetimeInDays = expireDays + } + + if caLifetimeInDays > DefaultCACertificateLifetimeInDays { + warnAboutCertificateLifeTime(subject.CommonName, DefaultCACertificateLifetimeInDays) + } + + caLifetime := time.Duration(caLifetimeInDays) * 24 * time.Hour + return makeSelfSignedCAConfigForSubjectAndDuration(subject, time.Now, caLifetime) +} + +func MakeSelfSignedCAConfigForDuration(name string, caLifetime time.Duration) (*TLSCertificateConfig, error) { + subject := pkix.Name{CommonName: name} + return makeSelfSignedCAConfigForSubjectAndDuration(subject, time.Now, caLifetime) +} + +func UnsafeMakeSelfSignedCAConfigForDurationAtTime(name string, currentTime func() time.Time, caLifetime time.Duration) (*TLSCertificateConfig, error) { + subject := pkix.Name{CommonName: name} + return makeSelfSignedCAConfigForSubjectAndDuration(subject, currentTime, caLifetime) +} + +func makeSelfSignedCAConfigForSubjectAndDuration(subject pkix.Name, currentTime func() time.Time, caLifetime time.Duration) (*TLSCertificateConfig, error) { + // Create CA cert + rootcaPublicKey, rootcaPrivateKey, publicKeyHash, err := newKeyPairWithHash() + if err != nil { + return nil, err + } + // AuthorityKeyId and SubjectKeyId should match for a self-signed CA + authorityKeyId := publicKeyHash + subjectKeyId := publicKeyHash + rootcaTemplate := newSigningCertificateTemplateForDuration(subject, caLifetime, currentTime, authorityKeyId, subjectKeyId) + rootcaCert, err := signCertificate(rootcaTemplate, rootcaPublicKey, rootcaTemplate, rootcaPrivateKey) + if err != nil { + return nil, err + } + caConfig := &TLSCertificateConfig{ + Certs: []*x509.Certificate{rootcaCert}, + Key: rootcaPrivateKey, + } + return caConfig, nil +} + +func MakeCAConfigForDuration(name string, caLifetime time.Duration, issuer *CA) (*TLSCertificateConfig, error) { + // Create CA cert + signerPublicKey, signerPrivateKey, publicKeyHash, err := newKeyPairWithHash() + if err != nil { + return nil, err + } + authorityKeyId := issuer.Config.Certs[0].SubjectKeyId + subjectKeyId := publicKeyHash + signerTemplate := newSigningCertificateTemplateForDuration(pkix.Name{CommonName: name}, caLifetime, time.Now, authorityKeyId, subjectKeyId) + signerCert, err := issuer.SignCertificate(signerTemplate, signerPublicKey) + if err != nil { + return nil, err + } + signerConfig := &TLSCertificateConfig{ + Certs: append([]*x509.Certificate{signerCert}, issuer.Config.Certs...), + Key: signerPrivateKey, + } + return signerConfig, nil +} + +// EnsureSubCA returns a subCA signed by the `ca`, whether it was created +// (as opposed to pre-existing), and any error that might occur during the subCA +// creation. +// If serialFile is an empty string, a RandomSerialGenerator will be used. +func (ca *CA) EnsureSubCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, bool, error) { + if subCA, err := GetCA(certFile, keyFile, serialFile); err == nil { + return subCA, false, err + } + subCA, err := ca.MakeAndWriteSubCA(certFile, keyFile, serialFile, name, expireDays) + return subCA, true, err +} + +// MakeAndWriteSubCA returns a new sub-CA configuration. New cert/key pair is generated +// while using this function. +// If serialFile is an empty string, a RandomSerialGenerator will be used. +func (ca *CA) MakeAndWriteSubCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, error) { + klog.V(4).Infof("Generating sub-CA certificate in %s, key in %s, serial in %s", certFile, keyFile, serialFile) + + subCAConfig, err := MakeCAConfigForDuration(name, time.Duration(expireDays)*time.Hour*24, ca) + if err != nil { + return nil, err + } + + if err := subCAConfig.WriteCertConfigFile(certFile, keyFile); err != nil { + return nil, err + } + + var serialGenerator SerialGenerator + if len(serialFile) > 0 { + // create / overwrite the serial file with a zero padded hex value (ending in a newline to have a valid file) + if err := os.WriteFile(serialFile, []byte("00\n"), 0644); err != nil { + return nil, err + } + + serialGenerator, err = NewSerialFileGenerator(serialFile) + if err != nil { + return nil, err + } + } else { + serialGenerator = &RandomSerialGenerator{} + } + + return &CA{ + Config: subCAConfig, + SerialGenerator: serialGenerator, + }, nil +} + +func (ca *CA) EnsureServerCert(certFile, keyFile string, hostnames sets.Set[string], expireDays int) (*TLSCertificateConfig, bool, error) { + certConfig, err := GetServerCert(certFile, keyFile, hostnames) + if err != nil { + certConfig, err = ca.MakeAndWriteServerCert(certFile, keyFile, hostnames, expireDays) + return certConfig, true, err + } + + return certConfig, false, nil +} + +func GetServerCert(certFile, keyFile string, hostnames sets.Set[string]) (*TLSCertificateConfig, error) { + server, err := GetTLSCertificateConfig(certFile, keyFile) + if err != nil { + return nil, err + } + + cert := server.Certs[0] + certNames := sets.New[string]() + for _, ip := range cert.IPAddresses { + certNames.Insert(ip.String()) + } + certNames.Insert(cert.DNSNames...) + if hostnames.Equal(certNames) { + klog.V(4).Infof("Found existing server certificate in %s", certFile) + return server, nil + } + + return nil, fmt.Errorf("Existing server certificate in %s does not match required hostnames.", certFile) +} + +func (ca *CA) MakeAndWriteServerCert(certFile, keyFile string, hostnames sets.Set[string], expireDays int) (*TLSCertificateConfig, error) { + klog.V(4).Infof("Generating server certificate in %s, key in %s", certFile, keyFile) + + server, err := ca.MakeServerCert(hostnames, expireDays) + if err != nil { + return nil, err + } + if err := server.WriteCertConfigFile(certFile, keyFile); err != nil { + return server, err + } + return server, nil +} + +// CertificateExtensionFunc is passed a certificate that it may extend, or return an error +// if the extension attempt failed. +type CertificateExtensionFunc func(*x509.Certificate) error + +func (ca *CA) MakeServerCert(hostnames sets.Set[string], expireDays int, fns ...CertificateExtensionFunc) (*TLSCertificateConfig, error) { + serverPublicKey, serverPrivateKey, publicKeyHash, _ := newKeyPairWithHash() + authorityKeyId := ca.Config.Certs[0].SubjectKeyId + subjectKeyId := publicKeyHash + serverTemplate := newServerCertificateTemplate(pkix.Name{CommonName: sets.List(hostnames)[0]}, sets.List(hostnames), expireDays, time.Now, authorityKeyId, subjectKeyId) + for _, fn := range fns { + if err := fn(serverTemplate); err != nil { + return nil, err + } + } + serverCrt, err := ca.SignCertificate(serverTemplate, serverPublicKey) + if err != nil { + return nil, err + } + server := &TLSCertificateConfig{ + Certs: append([]*x509.Certificate{serverCrt}, ca.Config.Certs...), + Key: serverPrivateKey, + } + return server, nil +} + +func (ca *CA) MakeServerCertForDuration(hostnames sets.Set[string], lifetime time.Duration, fns ...CertificateExtensionFunc) (*TLSCertificateConfig, error) { + serverPublicKey, serverPrivateKey, publicKeyHash, _ := newKeyPairWithHash() + authorityKeyId := ca.Config.Certs[0].SubjectKeyId + subjectKeyId := publicKeyHash + serverTemplate := newServerCertificateTemplateForDuration(pkix.Name{CommonName: sets.List(hostnames)[0]}, sets.List(hostnames), lifetime, time.Now, authorityKeyId, subjectKeyId) + for _, fn := range fns { + if err := fn(serverTemplate); err != nil { + return nil, err + } + } + serverCrt, err := ca.SignCertificate(serverTemplate, serverPublicKey) + if err != nil { + return nil, err + } + server := &TLSCertificateConfig{ + Certs: append([]*x509.Certificate{serverCrt}, ca.Config.Certs...), + Key: serverPrivateKey, + } + return server, nil +} + +func (ca *CA) EnsureClientCertificate(certFile, keyFile string, u user.Info, expireDays int) (*TLSCertificateConfig, bool, error) { + certConfig, err := GetClientCertificate(certFile, keyFile, u) + if err != nil { + certConfig, err = ca.MakeClientCertificate(certFile, keyFile, u, expireDays) + return certConfig, true, err // true indicates we wrote the files. + } + return certConfig, false, nil +} + +func GetClientCertificate(certFile, keyFile string, u user.Info) (*TLSCertificateConfig, error) { + certConfig, err := GetTLSCertificateConfig(certFile, keyFile) + if err != nil { + return nil, err + } + + if subject := certConfig.Certs[0].Subject; subjectChanged(subject, UserToSubject(u)) { + return nil, fmt.Errorf("existing client certificate in %s was issued for a different Subject (%s)", + certFile, subject) + } + + return certConfig, nil +} + +func subjectChanged(existing, expected pkix.Name) bool { + sort.Strings(existing.Organization) + sort.Strings(expected.Organization) + + return existing.CommonName != expected.CommonName || + existing.SerialNumber != expected.SerialNumber || + !reflect.DeepEqual(existing.Organization, expected.Organization) +} + +func (ca *CA) MakeClientCertificate(certFile, keyFile string, u user.Info, expireDays int) (*TLSCertificateConfig, error) { + klog.V(4).Infof("Generating client cert in %s and key in %s", certFile, keyFile) + // ensure parent dirs + if err := os.MkdirAll(filepath.Dir(certFile), os.FileMode(0755)); err != nil { + return nil, err + } + if err := os.MkdirAll(filepath.Dir(keyFile), os.FileMode(0755)); err != nil { + return nil, err + } + + clientPublicKey, clientPrivateKey, _ := NewKeyPair() + clientTemplate := NewClientCertificateTemplate(UserToSubject(u), expireDays, time.Now) + clientCrt, err := ca.SignCertificate(clientTemplate, clientPublicKey) + if err != nil { + return nil, err + } + + certData, err := EncodeCertificates(clientCrt) + if err != nil { + return nil, err + } + keyData, err := EncodeKey(clientPrivateKey) + if err != nil { + return nil, err + } + + if err = os.WriteFile(certFile, certData, os.FileMode(0644)); err != nil { + return nil, err + } + if err = os.WriteFile(keyFile, keyData, os.FileMode(0600)); err != nil { + return nil, err + } + + return GetTLSCertificateConfig(certFile, keyFile) +} + +func (ca *CA) MakeClientCertificateForDuration(u user.Info, lifetime time.Duration) (*TLSCertificateConfig, error) { + clientPublicKey, clientPrivateKey, _ := NewKeyPair() + clientTemplate := NewClientCertificateTemplateForDuration(UserToSubject(u), lifetime, time.Now) + clientCrt, err := ca.SignCertificate(clientTemplate, clientPublicKey) + if err != nil { + return nil, err + } + + certData, err := EncodeCertificates(clientCrt) + if err != nil { + return nil, err + } + keyData, err := EncodeKey(clientPrivateKey) + if err != nil { + return nil, err + } + + return GetTLSCertificateConfigFromBytes(certData, keyData) +} + +type sortedForDER []string + +func (s sortedForDER) Len() int { + return len(s) +} +func (s sortedForDER) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s sortedForDER) Less(i, j int) bool { + l1 := len(s[i]) + l2 := len(s[j]) + if l1 == l2 { + return s[i] < s[j] + } + return l1 < l2 +} + +func UserToSubject(u user.Info) pkix.Name { + // Ok we are going to order groups in a peculiar way here to workaround a + // 2 bugs, 1 in golang (https://github.com/golang/go/issues/24254) which + // incorrectly encodes Multivalued RDNs and another in GNUTLS clients + // which are too picky (https://gitlab.com/gnutls/gnutls/issues/403) + // and try to "correct" this issue when reading client certs. + // + // This workaround should be killed once Golang's pkix module is fixed to + // generate a correct DER encoding. + // + // The workaround relies on the fact that the first octect that differs + // between the encoding of two group RDNs will end up being the encoded + // length which is directly related to the group name's length. So we'll + // sort such that shortest names come first. + ugroups := u.GetGroups() + groups := make([]string, len(ugroups)) + copy(groups, ugroups) + sort.Sort(sortedForDER(groups)) + + return pkix.Name{ + CommonName: u.GetName(), + SerialNumber: u.GetUID(), + Organization: groups, + } +} + +func (ca *CA) SignCertificate(template *x509.Certificate, requestKey crypto.PublicKey) (*x509.Certificate, error) { + // Increment and persist serial + serial, err := ca.SerialGenerator.Next(template) + if err != nil { + return nil, err + } + template.SerialNumber = big.NewInt(serial) + return signCertificate(template, requestKey, ca.Config.Certs[0], ca.Config.Key) +} + +func NewKeyPair() (crypto.PublicKey, crypto.PrivateKey, error) { + return newRSAKeyPair() +} + +func newKeyPairWithHash() (crypto.PublicKey, crypto.PrivateKey, []byte, error) { + publicKey, privateKey, err := newRSAKeyPair() + var publicKeyHash []byte + if err == nil { + hash := sha1.New() + hash.Write(publicKey.N.Bytes()) + publicKeyHash = hash.Sum(nil) + } + return publicKey, privateKey, publicKeyHash, err +} + +func newRSAKeyPair() (*rsa.PublicKey, *rsa.PrivateKey, error) { + privateKey, err := rsa.GenerateKey(rand.Reader, keyBits) + if err != nil { + return nil, nil, err + } + return &privateKey.PublicKey, privateKey, nil +} + +// Can be used for CA or intermediate signing certs +func newSigningCertificateTemplateForDuration(subject pkix.Name, caLifetime time.Duration, currentTime func() time.Time, authorityKeyId, subjectKeyId []byte) *x509.Certificate { + return &x509.Certificate{ + Subject: subject, + + SignatureAlgorithm: x509.SHA256WithRSA, + + NotBefore: currentTime().Add(-1 * time.Second), + NotAfter: currentTime().Add(caLifetime), + + // Specify a random serial number to avoid the same issuer+serial + // number referring to different certs in a chain of trust if the + // signing certificate is ever rotated. + SerialNumber: big.NewInt(randomSerialNumber()), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + IsCA: true, + + AuthorityKeyId: authorityKeyId, + SubjectKeyId: subjectKeyId, + } +} + +// Can be used for ListenAndServeTLS +func newServerCertificateTemplate(subject pkix.Name, hosts []string, expireDays int, currentTime func() time.Time, authorityKeyId, subjectKeyId []byte) *x509.Certificate { + var lifetimeInDays = DefaultCertificateLifetimeInDays + if expireDays > 0 { + lifetimeInDays = expireDays + } + + if lifetimeInDays > DefaultCertificateLifetimeInDays { + warnAboutCertificateLifeTime(subject.CommonName, DefaultCertificateLifetimeInDays) + } + + lifetime := time.Duration(lifetimeInDays) * 24 * time.Hour + + return newServerCertificateTemplateForDuration(subject, hosts, lifetime, currentTime, authorityKeyId, subjectKeyId) +} + +// Can be used for ListenAndServeTLS +func newServerCertificateTemplateForDuration(subject pkix.Name, hosts []string, lifetime time.Duration, currentTime func() time.Time, authorityKeyId, subjectKeyId []byte) *x509.Certificate { + template := &x509.Certificate{ + Subject: subject, + + SignatureAlgorithm: x509.SHA256WithRSA, + + NotBefore: currentTime().Add(-1 * time.Second), + NotAfter: currentTime().Add(lifetime), + SerialNumber: big.NewInt(1), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + + AuthorityKeyId: authorityKeyId, + SubjectKeyId: subjectKeyId, + } + + template.IPAddresses, template.DNSNames = IPAddressesDNSNames(hosts) + + return template +} + +func IPAddressesDNSNames(hosts []string) ([]net.IP, []string) { + ips := []net.IP{} + dns := []string{} + for _, host := range hosts { + if ip := net.ParseIP(host); ip != nil { + ips = append(ips, ip) + } else { + dns = append(dns, host) + } + } + + // Include IP addresses as DNS subjectAltNames in the cert as well, for the sake of Python, Windows (< 10), and unnamed other libraries + // Ensure these technically invalid DNS subjectAltNames occur after the valid ones, to avoid triggering cert errors in Firefox + // See https://bugzilla.mozilla.org/show_bug.cgi?id=1148766 + for _, ip := range ips { + dns = append(dns, ip.String()) + } + + return ips, dns +} + +func CertsFromPEM(pemCerts []byte) ([]*x509.Certificate, error) { + ok := false + certs := []*x509.Certificate{} + for len(pemCerts) > 0 { + var block *pem.Block + block, pemCerts = pem.Decode(pemCerts) + if block == nil { + break + } + if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { + continue + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return certs, err + } + + certs = append(certs, cert) + ok = true + } + + if !ok { + return certs, errors.New("Could not read any certificates") + } + return certs, nil +} + +// Can be used as a certificate in http.Transport TLSClientConfig +func NewClientCertificateTemplate(subject pkix.Name, expireDays int, currentTime func() time.Time) *x509.Certificate { + var lifetimeInDays = DefaultCertificateLifetimeInDays + if expireDays > 0 { + lifetimeInDays = expireDays + } + + if lifetimeInDays > DefaultCertificateLifetimeInDays { + warnAboutCertificateLifeTime(subject.CommonName, DefaultCertificateLifetimeInDays) + } + + lifetime := time.Duration(lifetimeInDays) * 24 * time.Hour + + return NewClientCertificateTemplateForDuration(subject, lifetime, currentTime) +} + +// Can be used as a certificate in http.Transport TLSClientConfig +func NewClientCertificateTemplateForDuration(subject pkix.Name, lifetime time.Duration, currentTime func() time.Time) *x509.Certificate { + return &x509.Certificate{ + Subject: subject, + + SignatureAlgorithm: x509.SHA256WithRSA, + + NotBefore: currentTime().Add(-1 * time.Second), + NotAfter: currentTime().Add(lifetime), + SerialNumber: big.NewInt(1), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + BasicConstraintsValid: true, + } +} + +func warnAboutCertificateLifeTime(name string, defaultLifetimeInDays int) { + defaultLifetimeInYears := defaultLifetimeInDays / 365 + fmt.Fprintf(os.Stderr, "WARNING: Validity period of the certificate for %q is greater than %d years!\n", name, defaultLifetimeInYears) + fmt.Fprintln(os.Stderr, "WARNING: By security reasons it is strongly recommended to change this period and make it smaller!") +} + +func signCertificate(template *x509.Certificate, requestKey crypto.PublicKey, issuer *x509.Certificate, issuerKey crypto.PrivateKey) (*x509.Certificate, error) { + derBytes, err := x509.CreateCertificate(rand.Reader, template, issuer, requestKey, issuerKey) + if err != nil { + return nil, err + } + certs, err := x509.ParseCertificates(derBytes) + if err != nil { + return nil, err + } + if len(certs) != 1 { + return nil, errors.New("Expected a single certificate") + } + return certs[0], nil +} + +func EncodeCertificates(certs ...*x509.Certificate) ([]byte, error) { + b := bytes.Buffer{} + for _, cert := range certs { + if err := pem.Encode(&b, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}); err != nil { + return []byte{}, err + } + } + return b.Bytes(), nil +} +func EncodeKey(key crypto.PrivateKey) ([]byte, error) { + b := bytes.Buffer{} + switch key := key.(type) { + case *ecdsa.PrivateKey: + keyBytes, err := x509.MarshalECPrivateKey(key) + if err != nil { + return []byte{}, err + } + if err := pem.Encode(&b, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}); err != nil { + return b.Bytes(), err + } + case *rsa.PrivateKey: + if err := pem.Encode(&b, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)}); err != nil { + return []byte{}, err + } + default: + return []byte{}, errors.New("Unrecognized key type") + + } + return b.Bytes(), nil +} + +func writeCertificates(f io.Writer, certs ...*x509.Certificate) error { + bytes, err := EncodeCertificates(certs...) + if err != nil { + return err + } + if _, err := f.Write(bytes); err != nil { + return err + } + + return nil +} +func writeKeyFile(f io.Writer, key crypto.PrivateKey) error { + bytes, err := EncodeKey(key) + if err != nil { + return err + } + if _, err := f.Write(bytes); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/rotation.go b/vendor/github.com/openshift/library-go/pkg/crypto/rotation.go new file mode 100644 index 0000000000000..0aa127037c805 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/rotation.go @@ -0,0 +1,20 @@ +package crypto + +import ( + "crypto/x509" + "time" +) + +// FilterExpiredCerts checks are all certificates in the bundle valid, i.e. they have not expired. +// The function returns new bundle with only valid certificates or error if no valid certificate is found. +func FilterExpiredCerts(certs ...*x509.Certificate) []*x509.Certificate { + currentTime := time.Now() + var validCerts []*x509.Certificate + for _, c := range certs { + if c.NotAfter.After(currentTime) { + validCerts = append(validCerts, c) + } + } + + return validCerts +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/imageutil/helpers.go b/vendor/github.com/openshift/library-go/pkg/image/imageutil/helpers.go new file mode 100644 index 0000000000000..e74acf8637457 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/imageutil/helpers.go @@ -0,0 +1,518 @@ +package imageutil + +import ( + "encoding/json" + "fmt" + "regexp" + "sort" + "strings" + + "github.com/blang/semver/v4" + + "github.com/openshift/api/image/docker10" + imagev1 "github.com/openshift/api/image/v1" + digestinternal "github.com/openshift/library-go/pkg/image/internal/digest" + imagereference "github.com/openshift/library-go/pkg/image/reference" +) + +const ( + // DefaultImageTag is used when an image tag is needed and the configuration does not specify a tag to use. + DefaultImageTag = "latest" +) + +var ParseDigest = digestinternal.ParseDigest + +// SplitImageStreamTag turns the name of an ImageStreamTag into Name and Tag. +// It returns false if the tag was not properly specified in the name. +func SplitImageStreamTag(nameAndTag string) (name string, tag string, ok bool) { + parts := strings.SplitN(nameAndTag, ":", 2) + name = parts[0] + if len(parts) > 1 { + tag = parts[1] + } + if len(tag) == 0 { + tag = DefaultImageTag + } + return name, tag, len(parts) == 2 +} + +// SplitImageStreamImage turns the name of an ImageStreamImage into Name and ID. +// It returns false if the ID was not properly specified in the name. +func SplitImageStreamImage(nameAndID string) (name string, id string, ok bool) { + parts := strings.SplitN(nameAndID, "@", 2) + name = parts[0] + if len(parts) > 1 { + id = parts[1] + } + return name, id, len(parts) == 2 +} + +// JoinImageStreamTag turns a name and tag into the name of an ImageStreamTag +func JoinImageStreamTag(name, tag string) string { + if len(tag) == 0 { + tag = DefaultImageTag + } + return fmt.Sprintf("%s:%s", name, tag) +} + +// JoinImageStreamImage creates a name for image stream image object from an image stream name and an id. +func JoinImageStreamImage(name, id string) string { + return fmt.Sprintf("%s@%s", name, id) +} + +// ParseImageStreamTagName splits a string into its name component and tag component, and returns an error +// if the string is not in the right form. +func ParseImageStreamTagName(istag string) (name string, tag string, err error) { + if strings.Contains(istag, "@") { + err = fmt.Errorf("%q is an image stream image, not an image stream tag", istag) + return + } + segments := strings.SplitN(istag, ":", 3) + switch len(segments) { + case 2: + name = segments[0] + tag = segments[1] + if len(name) == 0 || len(tag) == 0 { + err = fmt.Errorf("image stream tag name %q must have a name and a tag", istag) + } + default: + err = fmt.Errorf("expected exactly one : delimiter in the istag %q", istag) + } + return +} + +// ParseImageStreamImageName splits a string into its name component and ID component, and returns an error +// if the string is not in the right form. +func ParseImageStreamImageName(input string) (name string, id string, err error) { + segments := strings.SplitN(input, "@", 3) + switch len(segments) { + case 2: + name = segments[0] + id = segments[1] + if len(name) == 0 || len(id) == 0 { + err = fmt.Errorf("image stream image name %q must have a name and ID", input) + } + default: + err = fmt.Errorf("expected exactly one @ in the isimage name %q", input) + } + return +} + +var ( + reMinorSemantic = regexp.MustCompile(`^[\d]+\.[\d]+$`) + reMinorWithPatch = regexp.MustCompile(`^([\d]+\.[\d]+)-\w+$`) +) + +type tagPriority int + +const ( + // the "latest" tag + tagPriorityLatest tagPriority = iota + + // a semantic minor version ("5.1", "v5.1", "v5.1-rc1") + tagPriorityMinor + + // a full semantic version ("5.1.3-other", "v5.1.3-other") + tagPriorityFull + + // other tags + tagPriorityOther +) + +type prioritizedTag struct { + tag string + priority tagPriority + semver semver.Version + prefix string +} + +func prioritizeTag(tag string) prioritizedTag { + if tag == "latest" { + return prioritizedTag{ + tag: tag, + priority: tagPriorityLatest, + } + } + + short := tag + prefix := "" + if strings.HasPrefix(tag, "v") { + prefix = "v" + short = tag[1:] + } + + // 5.1.3 + if v, err := semver.Parse(short); err == nil { + return prioritizedTag{ + tag: tag, + priority: tagPriorityFull, + semver: v, + prefix: prefix, + } + } + + // 5.1 + if reMinorSemantic.MatchString(short) { + if v, err := semver.Parse(short + ".0"); err == nil { + return prioritizedTag{ + tag: tag, + priority: tagPriorityMinor, + semver: v, + prefix: prefix, + } + } + } + + // 5.1-rc1 + if match := reMinorWithPatch.FindStringSubmatch(short); match != nil { + if v, err := semver.Parse(strings.Replace(short, match[1], match[1]+".0", 1)); err == nil { + return prioritizedTag{ + tag: tag, + priority: tagPriorityMinor, + semver: v, + prefix: prefix, + } + } + } + + // other + return prioritizedTag{ + tag: tag, + priority: tagPriorityOther, + prefix: prefix, + } +} + +type prioritizedTags []prioritizedTag + +func (t prioritizedTags) Len() int { return len(t) } +func (t prioritizedTags) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t prioritizedTags) Less(i, j int) bool { + if t[i].priority != t[j].priority { + return t[i].priority < t[j].priority + } + + if t[i].priority == tagPriorityOther { + return t[i].tag < t[j].tag + } + + cmp := t[i].semver.Compare(t[j].semver) + if cmp > 0 { // the newer tag has a higher priority + return true + } + return cmp == 0 && t[i].prefix < t[j].prefix +} + +// PrioritizeTags orders a set of image tags with a few conventions: +// +// 1. the "latest" tag, if present, should be first +// 2. any tags that represent a semantic minor version ("5.1", "v5.1", "v5.1-rc1") should be next, in descending order +// 3. any tags that represent a full semantic version ("5.1.3-other", "v5.1.3-other") should be next, in descending order +// 4. any remaining tags should be sorted in lexicographic order +// +// The method updates the tags in place. +func PrioritizeTags(tags []string) { + ptags := make(prioritizedTags, len(tags)) + for i, tag := range tags { + ptags[i] = prioritizeTag(tag) + } + sort.Sort(ptags) + for i, pt := range ptags { + tags[i] = pt.tag + } +} + +// SpecHasTag returns named tag from image stream's spec and boolean whether one was found. +func SpecHasTag(stream *imagev1.ImageStream, name string) (imagev1.TagReference, bool) { + for _, tag := range stream.Spec.Tags { + if tag.Name == name { + return tag, true + } + } + return imagev1.TagReference{}, false +} + +// StatusHasTag returns named tag from image stream's status and boolean whether one was found. +func StatusHasTag(stream *imagev1.ImageStream, name string) (imagev1.NamedTagEventList, bool) { + for _, tag := range stream.Status.Tags { + if tag.Tag == name { + return tag, true + } + } + return imagev1.NamedTagEventList{}, false +} + +// LatestTaggedImage returns the most recent TagEvent for the specified image +// repository and tag. Will resolve lookups for the empty tag. Returns nil +// if tag isn't present in stream.status.tags. +func LatestTaggedImage(stream *imagev1.ImageStream, tag string) *imagev1.TagEvent { + if len(tag) == 0 { + tag = imagev1.DefaultImageTag + } + + // find the most recent tag event with an image reference + t, ok := StatusHasTag(stream, tag) + if ok { + if len(t.Items) == 0 { + return nil + } + return &t.Items[0] + } + + return nil +} + +// ImageWithMetadata mutates the given image. It parses raw DockerImageManifest data stored in the image and +// fills its DockerImageMetadata and other fields. +// Copied from github.com/openshift/image-registry/pkg/origin-common/util/util.go +func ImageWithMetadata(image *imagev1.Image) error { + // Check if the metadata are already filled in for this image. + meta, hasMetadata := image.DockerImageMetadata.Object.(*docker10.DockerImage) + if hasMetadata && meta.Size > 0 { + return nil + } + + version := image.DockerImageMetadataVersion + if len(version) == 0 { + version = "1.0" + } + + obj := &docker10.DockerImage{} + if len(image.DockerImageMetadata.Raw) != 0 { + if err := json.Unmarshal(image.DockerImageMetadata.Raw, obj); err != nil { + return err + } + image.DockerImageMetadata.Object = obj + } + + image.DockerImageMetadataVersion = version + + return nil +} + +func ImageWithMetadataOrDie(image *imagev1.Image) { + if err := ImageWithMetadata(image); err != nil { + panic(err) + } +} + +// TagReferencesLocalTag returns true if the provided tag reference references another image stream tag +// in the current image stream. This is only true when from points to an ImageStreamTag without a colon +// or from.name is :. +func TagReferencesLocalTag(stream *imagev1.ImageStream, tag imagev1.TagReference) (string, bool) { + if tag.From == nil || tag.From.Kind != "ImageStreamTag" { + return "", false + } + if len(tag.From.Namespace) > 0 && tag.From.Namespace != stream.Namespace { + return "", false + } + ref := strings.TrimPrefix(tag.From.Name, stream.Name+":") + if strings.Contains(ref, ":") { + return "", false + } + return ref, true +} + +var ( + // ErrNoStreamRepository is returned if the status dockerImageRepository field was unset but the + // method required that value to create a pull spec. + ErrNoStreamRepository = fmt.Errorf("no image repository has been set on the image stream status") + // ErrWaitForPullSpec is returned when a pull spec cannot be inferred from the image stream automatically + // and the user requires a valid image tag. + ErrWaitForPullSpec = fmt.Errorf("the pull spec cannot be determined yet") +) + +// ResolveNewestPullSpecForTag returns the most recent available pull spec for the given tag, even +// if importing that pull spec is still in progress or has failed. Use this method when the current +// state of the tag as the user sees it is important because you don't want to silently ignore a +// newer tag request that hasn't yet been imported. Note that if no image has been tagged or pushed, +// pullSpec will still be returned pointing to the pull spec for the tag within the image repository +// (: unless defaultExternal is set) and isTagEmpty will be true. +// hasStatus is true if the returned pull spec points to an imported / pushed image, or false if +// a spec tag has not been specified, the spec tag hasn't been imported, or the import has failed. +// An error is returned only if isTagEmpty is true and status.dockerImageRepository is unset because +// the administrator has not installed a registry server. +// +// Use this method when you need the user intent pull spec and you do not want to tolerate a slightly +// older image (tooling that needs to error if the user's intent in tagging isn't realized). +func ResolveNewestPullSpecForTag(stream *imagev1.ImageStream, tag string, defaultExternal bool) (pullSpec string, hasStatus, isTagEmpty bool, err error) { + pullSpec, _, hasStatus, isTagEmpty, err = resolvePullSpecForTag(stream, tag, defaultExternal, true) + return pullSpec, hasStatus, isTagEmpty, err +} + +// ResolveRecentPullSpecForTag returns the most recent successfully imported pull sec for the +// given tag, i.e. "last-known-good". Use this method when you can tolerate some lag in picking up +// the newest version. This method is roughly equivalent to the behavior of pulling the pod from +// the internal registry. If no image has been tagged or pushed, pullSpec will still be returned +// pointing to the pull spec for the tag within the image repository +// (: unless defaultExternal is set) and isTagEmpty will be true. +// hasNewer is true if the pull spec does not represent the newest user input, or false if the +// current user spec tag has been imported successfully. hasStatus is true if the returned pull +// spec points to an imported / pushed image, or false if a spec tag has not been specified, the +// spec tag hasn't been imported, or the import has failed. An error is returned only if isTagEmpty +// is true and status.dockerImageRepository is unset because the administrator has not installed a +// registry server. +// +// This method is typically used by consumers that need the value at the tag and prefer to have a +// slightly older image over not getting any image at all (or if the image can't be imported +// due to temporary network or controller issues). +func ResolveRecentPullSpecForTag(stream *imagev1.ImageStream, tag string, defaultExternal bool) (pullSpec string, hasNewer, hasStatus, isTagEmpty bool, err error) { + pullSpec, hasNewer, hasStatus, isTagEmpty, err = resolvePullSpecForTag(stream, tag, defaultExternal, false) + return pullSpec, hasNewer, hasStatus, isTagEmpty, err +} + +// resolvePullSpecForTag handles finding the most accurate pull spec depending on whether the user +// requires the latest or simply wants the most recent imported version (ignores pending imports). +// If a pull spec cannot be inferred an error is returned. Otherwise the following status values are +// returned: +// +// * hasNewer - a newer version of this tag is being imported but is not ready +// * hasStatus - this pull spec points to the latest image in the status (has been imported / pushed) +// * isTagEmpty - no pull spec or push has occurred to this tag, but it's still possible to get a pull spec +// +// defaultExternal is considered when isTagEmpty is true (no user input provided) and calculates the pull +// spec from the external repository base (status.publicDockerImageRepository) if it is set. +func resolvePullSpecForTag(stream *imagev1.ImageStream, tag string, defaultExternal, requireLatest bool) (pullSpec string, hasNewer, hasStatus, isTagEmpty bool, err error) { + if len(tag) == 0 { + tag = imagev1.DefaultImageTag + } + status, _ := StatusHasTag(stream, tag) + spec, hasSpec := SpecHasTag(stream, tag) + hasSpecTagRef := hasSpec && spec.From != nil && spec.From.Kind == "DockerImage" && spec.ReferencePolicy.Type == imagev1.SourceTagReferencePolicy + + var event *imagev1.TagEvent + switch { + case len(status.Items) == 0: + // nothing in status: + // - waiting for import of first image (generation of spec > status) + // - spec is empty + // - spec is a ref tag to something else that hasn't been imported yet + // - spec is a ref tag to another spec tag on this same image stream that doesn't exist + + case hasSpec && spec.Generation != nil && *spec.Generation > status.Items[0].Generation: + // waiting for import because spec generation is newer and had a previous image + if requireLatest { + // note: if spec tag doesn't have a DockerImage kind, we'll have to wait for whatever + // logic is necessary for import to run (this could happen if a new Kind is introduced) + if !hasSpecTagRef { + return "", hasNewer, false, false, ErrWaitForPullSpec + } + } else { + event = &status.Items[0] + hasNewer = true + } + default: + // this is the latest version of the image + event = &status.Items[0] + } + + switch { + case event != nil: + hasStatus = true + pullSpec = resolveReferenceForTagEvent(stream, spec, event) + case hasSpecTagRef: + // if the user explicitly provided a spec tag we can use + pullSpec = resolveReferenceForTagEvent(stream, spec, &imagev1.TagEvent{ + DockerImageReference: spec.From.Name, + }) + default: + isTagEmpty = true + repositorySpec := stream.Status.DockerImageRepository + if defaultExternal && len(stream.Status.PublicDockerImageRepository) > 0 { + repositorySpec = stream.Status.PublicDockerImageRepository + } + if len(repositorySpec) == 0 { + return "", false, false, false, ErrNoStreamRepository + } + pullSpec = JoinImageStreamTag(repositorySpec, tag) + } + return pullSpec, hasNewer, hasStatus, isTagEmpty, nil +} + +// ResolveLatestTaggedImage returns the appropriate pull spec for a given tag in +// the image stream, handling the tag's reference policy if necessary to return +// a resolved image. Callers that transform an ImageStreamTag into a pull spec +// should use this method instead of LatestTaggedImage. This method ignores pending +// imports (meaning the requested image may be stale) and will return no pull spec +// even if one is available on the spec tag (when importing kind DockerImage) if +// import has not completed. +// +// Use ResolvePullSpecForTag() if you wish more control over what type of pull spec +// is returned and what scenarios should be handled. +func ResolveLatestTaggedImage(stream *imagev1.ImageStream, tag string) (string, bool) { + if len(tag) == 0 { + tag = imagev1.DefaultImageTag + } + return resolveTagReference(stream, tag, LatestTaggedImage(stream, tag)) +} + +// ResolveTagReference applies the tag reference rules for a stream, tag, and tag event for +// that tag. It returns true if the tag is +func resolveTagReference(stream *imagev1.ImageStream, tag string, latest *imagev1.TagEvent) (string, bool) { + // no image has been imported, so we can't resolve to a tagged image (we need an image id) + if latest == nil { + return "", false + } + // retrieve spec policy - if not found, we use the latest spec + ref, ok := SpecHasTag(stream, tag) + if !ok { + return latest.DockerImageReference, true + } + return resolveReferenceForTagEvent(stream, ref, latest), true +} + +// resolveReferenceForTagEvent applies the tag reference rules for a stream, tag, and tag event for +// that tag. +func resolveReferenceForTagEvent(stream *imagev1.ImageStream, ref imagev1.TagReference, latest *imagev1.TagEvent) string { + switch ref.ReferencePolicy.Type { + // the local reference policy attempts to use image pull through on the integrated + // registry if possible + case imagev1.LocalTagReferencePolicy: + local := stream.Status.DockerImageRepository + if len(local) == 0 || len(latest.Image) == 0 { + // fallback to the originating reference if no local docker registry defined or we + // lack an image ID + return latest.DockerImageReference + } + + // we must use imageapi's helper since we're calling Exact later on, which produces string + ref, err := imagereference.Parse(local) + if err != nil { + // fallback to the originating reference if the reported local repository spec is not valid + return latest.DockerImageReference + } + + // create a local pullthrough URL + ref.Tag = "" + ref.ID = latest.Image + return ref.Exact() + + // the default policy is to use the originating image + default: + return latest.DockerImageReference + } +} + +// DigestOrImageMatch matches the digest in the image name. +func DigestOrImageMatch(image, imageID string) bool { + if d, err := ParseDigest(image); err == nil { + return strings.HasPrefix(d.Hex(), imageID) || strings.HasPrefix(image, imageID) + } + return strings.HasPrefix(image, imageID) +} + +// ParseDockerImageReference parses a Docker pull spec string into a +// DockerImageReference. +func ParseDockerImageReference(spec string) (imagev1.DockerImageReference, error) { + ref, err := imagereference.Parse(spec) + if err != nil { + return imagev1.DockerImageReference{}, err + } + return imagev1.DockerImageReference{ + Registry: ref.Registry, + Namespace: ref.Namespace, + Name: ref.Name, + Tag: ref.Tag, + ID: ref.ID, + }, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/internal/digest/digest.go b/vendor/github.com/openshift/library-go/pkg/image/internal/digest/digest.go new file mode 100644 index 0000000000000..5c273e64047a7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/internal/digest/digest.go @@ -0,0 +1,138 @@ +package digest + +import ( + "fmt" + "hash" + "io" + "regexp" + "strings" +) + +const ( + // DigestSha256EmptyTar is the canonical sha256 digest of empty data + DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +) + +// Digest allows simple protection of hex formatted digest strings, prefixed +// by their algorithm. Strings of type Digest have some guarantee of being in +// the correct format and it provides quick access to the components of a +// digest string. +// +// The following is an example of the contents of Digest types: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// This allows to abstract the digest behind this type and work only in those +// terms. +type Digest string + +// NewDigest returns a Digest from alg and a hash.Hash object. +func NewDigest(alg Algorithm, h hash.Hash) Digest { + return NewDigestFromBytes(alg, h.Sum(nil)) +} + +// NewDigestFromBytes returns a new digest from the byte contents of p. +// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) +// functions. This is also useful for rebuilding digests from binary +// serializations. +func NewDigestFromBytes(alg Algorithm, p []byte) Digest { + return Digest(fmt.Sprintf("%s:%x", alg, p)) +} + +// NewDigestFromHex returns a Digest from alg and a the hex encoded digest. +func NewDigestFromHex(alg, hex string) Digest { + return Digest(fmt.Sprintf("%s:%s", alg, hex)) +} + +// DigestRegexp matches valid digest types. +var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`) + +// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. +var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) + +var ( + // ErrDigestInvalidFormat returned when digest format invalid. + ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") + + // ErrDigestInvalidLength returned when digest has invalid length. + ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") + + // ErrDigestUnsupported returned when the digest algorithm is unsupported. + ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") +) + +// ParseDigest parses s and returns the validated digest object. An error will +// be returned if the format is invalid. +func ParseDigest(s string) (Digest, error) { + d := Digest(s) + + return d, d.Validate() +} + +// FromReader returns the most valid digest for the underlying content using +// the canonical digest algorithm. +func FromReader(rd io.Reader) (Digest, error) { + return Canonical.FromReader(rd) +} + +// FromBytes digests the input and returns a Digest. +func FromBytes(p []byte) Digest { + return Canonical.FromBytes(p) +} + +// Validate checks that the contents of d is a valid digest, returning an +// error if not. +func (d Digest) Validate() error { + s := string(d) + + if !DigestRegexpAnchored.MatchString(s) { + return ErrDigestInvalidFormat + } + + i := strings.Index(s, ":") + if i < 0 { + return ErrDigestInvalidFormat + } + + // case: "sha256:" with no hex. + if i+1 == len(s) { + return ErrDigestInvalidFormat + } + + switch algorithm := Algorithm(s[:i]); algorithm { + case SHA256, SHA384, SHA512: + if algorithm.Size()*2 != len(s[i+1:]) { + return ErrDigestInvalidLength + } + default: + return ErrDigestUnsupported + } + + return nil +} + +// Algorithm returns the algorithm portion of the digest. This will panic if +// the underlying digest is not in a valid format. +func (d Digest) Algorithm() Algorithm { + return Algorithm(d[:d.sepIndex()]) +} + +// Hex returns the hex digest portion of the digest. This will panic if the +// underlying digest is not in a valid format. +func (d Digest) Hex() string { + return string(d[d.sepIndex()+1:]) +} + +func (d Digest) String() string { + return string(d) +} + +func (d Digest) sepIndex() int { + i := strings.Index(string(d), ":") + + if i < 0 { + panic("could not find ':' in digest: " + d) + } + + return i +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/internal/digest/digester.go b/vendor/github.com/openshift/library-go/pkg/image/internal/digest/digester.go new file mode 100644 index 0000000000000..f3105a45b6977 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/internal/digest/digester.go @@ -0,0 +1,155 @@ +package digest + +import ( + "crypto" + "fmt" + "hash" + "io" +) + +// Algorithm identifies and implementation of a digester by an identifier. +// Note the that this defines both the hash algorithm used and the string +// encoding. +type Algorithm string + +// supported digest types +const ( + SHA256 Algorithm = "sha256" // sha256 with hex encoding + SHA384 Algorithm = "sha384" // sha384 with hex encoding + SHA512 Algorithm = "sha512" // sha512 with hex encoding + + // Canonical is the primary digest algorithm used with the distribution + // project. Other digests may be used but this one is the primary storage + // digest. + Canonical = SHA256 +) + +var ( + // TODO(stevvooe): Follow the pattern of the standard crypto package for + // registration of digests. Effectively, we are a registerable set and + // common symbol access. + + // algorithms maps values to hash.Hash implementations. Other algorithms + // may be available but they cannot be calculated by the digest package. + algorithms = map[Algorithm]crypto.Hash{ + SHA256: crypto.SHA256, + SHA384: crypto.SHA384, + SHA512: crypto.SHA512, + } +) + +// Available returns true if the digest type is available for use. If this +// returns false, New and Hash will return nil. +func (a Algorithm) Available() bool { + h, ok := algorithms[a] + if !ok { + return false + } + + // check availability of the hash, as well + return h.Available() +} + +func (a Algorithm) String() string { + return string(a) +} + +// Size returns number of bytes returned by the hash. +func (a Algorithm) Size() int { + h, ok := algorithms[a] + if !ok { + return 0 + } + return h.Size() +} + +// Set implemented to allow use of Algorithm as a command line flag. +func (a *Algorithm) Set(value string) error { + if value == "" { + *a = Canonical + } else { + // just do a type conversion, support is queried with Available. + *a = Algorithm(value) + } + + return nil +} + +// New returns a new digester for the specified algorithm. If the algorithm +// does not have a digester implementation, nil will be returned. This can be +// checked by calling Available before calling New. +func (a Algorithm) New() Digester { + return &digester{ + alg: a, + hash: a.Hash(), + } +} + +// Hash returns a new hash as used by the algorithm. If not available, the +// method will panic. Check Algorithm.Available() before calling. +func (a Algorithm) Hash() hash.Hash { + if !a.Available() { + // NOTE(stevvooe): A missing hash is usually a programming error that + // must be resolved at compile time. We don't import in the digest + // package to allow users to choose their hash implementation (such as + // when using stevvooe/resumable or a hardware accelerated package). + // + // Applications that may want to resolve the hash at runtime should + // call Algorithm.Available before call Algorithm.Hash(). + panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) + } + + return algorithms[a].New() +} + +// FromReader returns the digest of the reader using the algorithm. +func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { + digester := a.New() + + if _, err := io.Copy(digester.Hash(), rd); err != nil { + return "", err + } + + return digester.Digest(), nil +} + +// FromBytes digests the input and returns a Digest. +func (a Algorithm) FromBytes(p []byte) Digest { + digester := a.New() + + if _, err := digester.Hash().Write(p); err != nil { + // Writes to a Hash should never fail. None of the existing + // hash implementations in the stdlib or hashes vendored + // here can return errors from Write. Having a panic in this + // condition instead of having FromBytes return an error value + // avoids unnecessary error handling paths in all callers. + panic("write to hash function returned error: " + err.Error()) + } + + return digester.Digest() +} + +// TODO(stevvooe): Allow resolution of verifiers using the digest type and +// this registration system. + +// Digester calculates the digest of written data. Writes should go directly +// to the return value of Hash, while calling Digest will return the current +// value of the digest. +type Digester interface { + Hash() hash.Hash // provides direct access to underlying hash instance. + Digest() Digest +} + +// digester provides a simple digester definition that embeds a hasher. +type digester struct { + alg Algorithm + hash hash.Hash +} + +func (d *digester) Hash() hash.Hash { + return d.hash +} + +func (d *digester) Digest() Digest { + return NewDigest(d.alg, d.hash) +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/internal/digest/doc.go b/vendor/github.com/openshift/library-go/pkg/image/internal/digest/doc.go new file mode 100644 index 0000000000000..88f1774bb7ce8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/internal/digest/doc.go @@ -0,0 +1,5 @@ +// digest is a copy from "github.com/distribution/distribution/v3/digest" that is kept because we want to avoid the godep, +// this package has no non-standard dependencies, and if it changes lots of other docker registry stuff breaks. +// Don't try this at home! +// Changes here require sign-off from openshift/api-reviewers and they will be rejected. +package digest diff --git a/vendor/github.com/openshift/library-go/pkg/image/internal/reference/doc.go b/vendor/github.com/openshift/library-go/pkg/image/internal/reference/doc.go new file mode 100644 index 0000000000000..7d06e325a3001 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/internal/reference/doc.go @@ -0,0 +1,5 @@ +// reference is a copy from "github.com/distribution/distribution/v3/reference" that is kept because we want to avoid the godep, +// this package has no non-standard dependencies, and if it changes lots of other docker registry stuff breaks. +// Don't try this at home! +// Changes here require sign-off from openshift/api-reviewers and they will be rejected. +package reference diff --git a/vendor/github.com/openshift/library-go/pkg/image/internal/reference/reference.go b/vendor/github.com/openshift/library-go/pkg/image/internal/reference/reference.go new file mode 100644 index 0000000000000..32975f2b2fe61 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/internal/reference/reference.go @@ -0,0 +1,370 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [hostname '/'] component ['/' component]* +// hostname := hostcomponent ['.' hostcomponent]* [':' port-number] +// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ] +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +package reference + +import ( + "errors" + "fmt" + "path" + "strings" + + "github.com/openshift/library-go/pkg/image/internal/digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with hostname and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +func SplitHostname(named Named) (string, string) { + name := named.Name() + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + ref := reference{ + name: matches[1], + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.ParseDigest(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name, otherwise an error is +// returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + ref, err := Parse(s) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + if !anchoredNameRegexp.MatchString(name) { + return nil, ErrReferenceInvalidFormat + } + return repository(name), nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + if canonical, ok := name.(Canonical); ok { + return reference{ + name: name.Name(), + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + name: name.Name(), + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + if tagged, ok := name.(Tagged); ok { + return reference{ + name: name.Name(), + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + name: name.Name(), + digest: digest, + }, nil +} + +// Match reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func Match(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, ref.String()) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, namedRef.Name()) + } + return matched, err +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + return repository(ref.Name()) +} + +func getBestReferenceType(ref reference) Reference { + if ref.name == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + name: ref.name, + digest: ref.digest, + } + } + return repository(ref.name) + } + if ref.digest == "" { + return taggedReference{ + name: ref.name, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + name string + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.name + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Name() string { + return r.name +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository string + +func (r repository) String() string { + return string(r) +} + +func (r repository) Name() string { + return string(r) +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return string(d) +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + name string + tag string +} + +func (t taggedReference) String() string { + return t.name + ":" + t.tag +} + +func (t taggedReference) Name() string { + return t.name +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + name string + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.name + "@" + c.digest.String() +} + +func (c canonicalReference) Name() string { + return c.name +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/internal/reference/regexp.go b/vendor/github.com/openshift/library-go/pkg/image/internal/reference/regexp.go new file mode 100644 index 0000000000000..9a7d366bc8a8c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/internal/reference/regexp.go @@ -0,0 +1,124 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // hostnameComponentRegexp restricts the registry hostname component of a + // repository name to start with a component as defined by hostnameRegexp + // and followed by an optional port. + hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // hostnameRegexp defines the structure of potential hostname components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + hostnameRegexp = expression( + hostnameComponentRegexp, + optional(repeated(literal(`.`), hostnameComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the hostname and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(hostnameRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // hostname and trailing components. + anchoredNameRegexp = anchored( + optional(capture(hostnameRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/reference/reference.go b/vendor/github.com/openshift/library-go/pkg/image/reference/reference.go new file mode 100644 index 0000000000000..2547d7585f9ef --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/reference/reference.go @@ -0,0 +1,257 @@ +package reference + +import ( + "net" + "net/url" + "strings" + + "github.com/openshift/library-go/pkg/image/internal/digest" + "github.com/openshift/library-go/pkg/image/internal/reference" +) + +// DockerImageReference points to a Docker image. +type DockerImageReference struct { + Registry string + Namespace string + Name string + Tag string + ID string +} + +const ( + // DockerDefaultRegistry is the value for the registry when none was provided. + DockerDefaultRegistry = "docker.io" + // DockerDefaultV1Registry is the host name of the default v1 registry + DockerDefaultV1Registry = "index." + DockerDefaultRegistry + // DockerDefaultV2Registry is the host name of the default v2 registry + DockerDefaultV2Registry = "registry-1." + DockerDefaultRegistry +) + +// Parse parses a Docker pull spec string into a +// DockerImageReference. +func Parse(spec string) (DockerImageReference, error) { + var ref DockerImageReference + + namedRef, err := reference.ParseNamed(spec) + if err != nil { + return ref, err + } + + name := namedRef.Name() + i := strings.IndexRune(name, '/') + + // if there are no path components, and it looks like a url (contains a .) or localhost, it's a registry + isRegistryOnly := i == -1 && (strings.ContainsAny(name, ".") || strings.HasPrefix(name, "localhost")) + + // if there are no path components, and it's not a registry, it's a name + isNameOnly := i == -1 && !isRegistryOnly + + // if there are path components, and the first component doesn't look like a url, it's a name + isNameOnly = isNameOnly || (i > -1 && (!strings.ContainsAny(name[:i], ":.") && name[:i] != "localhost")) + + if isRegistryOnly { + ref.Registry = namedRef.String() + } else if isNameOnly { + ref.Name = name + } else { + ref.Registry, ref.Name = name[:i], name[i+1:] + } + + if named, ok := namedRef.(reference.NamedTagged); !isRegistryOnly && ok { + ref.Tag = named.Tag() + } + + if named, ok := namedRef.(reference.Canonical); ok { + ref.ID = named.Digest().String() + } + + // It's not enough just to use the reference.ParseNamed(). We have to fill + // ref.Namespace from ref.Name + if i := strings.IndexRune(ref.Name, '/'); i != -1 { + ref.Namespace, ref.Name = ref.Name[:i], ref.Name[i+1:] + } + + return ref, nil +} + +// Equal returns true if the other DockerImageReference is equivalent to the +// reference r. The comparison applies defaults to the Docker image reference, +// so that e.g., "foobar" equals "docker.io/library/foobar:latest". +func (r DockerImageReference) Equal(other DockerImageReference) bool { + defaultedRef := r.DockerClientDefaults() + otherDefaultedRef := other.DockerClientDefaults() + return defaultedRef == otherDefaultedRef +} + +// DockerClientDefaults sets the default values used by the Docker client. +func (r DockerImageReference) DockerClientDefaults() DockerImageReference { + if len(r.Registry) == 0 { + r.Registry = DockerDefaultRegistry + } + if len(r.Namespace) == 0 && IsRegistryDockerHub(r.Registry) { + r.Namespace = "library" + } + if len(r.Tag) == 0 { + r.Tag = "latest" + } + return r +} + +// Minimal reduces a DockerImageReference to its minimalist form. +func (r DockerImageReference) Minimal() DockerImageReference { + if r.Tag == "latest" { + r.Tag = "" + } + return r +} + +// AsRepository returns the reference without tags or IDs. +func (r DockerImageReference) AsRepository() DockerImageReference { + r.Tag = "" + r.ID = "" + return r +} + +// RepositoryName returns the registry relative name +func (r DockerImageReference) RepositoryName() string { + r.Tag = "" + r.ID = "" + r.Registry = "" + return r.Exact() +} + +// RegistryHostPort returns the registry hostname and the port. +// If the port is not specified in the registry hostname we default to 443. +// This will also default to Docker client defaults if the registry hostname is empty. +func (r DockerImageReference) RegistryHostPort(insecure bool) (string, string) { + registryHost := r.AsV2().DockerClientDefaults().Registry + if strings.Contains(registryHost, ":") { + hostname, port, _ := net.SplitHostPort(registryHost) + return hostname, port + } + if insecure { + return registryHost, "80" + } + return registryHost, "443" +} + +// RepositoryName returns the registry relative name +func (r DockerImageReference) RegistryURL() *url.URL { + return &url.URL{ + Scheme: "https", + Host: r.AsV2().Registry, + } +} + +// DaemonMinimal clears defaults that Docker assumes. +func (r DockerImageReference) DaemonMinimal() DockerImageReference { + switch r.Registry { + case DockerDefaultV1Registry, DockerDefaultV2Registry: + r.Registry = DockerDefaultRegistry + } + if IsRegistryDockerHub(r.Registry) && r.Namespace == "library" { + r.Namespace = "" + } + return r.Minimal() +} + +func (r DockerImageReference) AsV2() DockerImageReference { + switch r.Registry { + case DockerDefaultV1Registry, DockerDefaultRegistry: + r.Registry = DockerDefaultV2Registry + } + return r +} + +// MostSpecific returns the most specific image reference that can be constructed from the +// current ref, preferring an ID over a Tag. Allows client code dealing with both tags and IDs +// to get the most specific reference easily. +func (r DockerImageReference) MostSpecific() DockerImageReference { + if len(r.ID) == 0 { + return r + } + if _, err := digest.ParseDigest(r.ID); err == nil { + r.Tag = "" + return r + } + if len(r.Tag) == 0 { + r.Tag, r.ID = r.ID, "" + return r + } + return r +} + +// NameString returns the name of the reference with its tag or ID. +func (r DockerImageReference) NameString() string { + switch { + case len(r.Name) == 0: + return "" + case len(r.ID) > 0: + var ref string + if _, err := digest.ParseDigest(r.ID); err == nil { + // if it parses as a digest, its v2 pull by id + ref = "@" + r.ID + } else { + // if it doesn't parse as a digest, it's presumably a v1 registry by-id tag + ref = ":" + r.ID + } + return r.Name + ref + case len(r.Tag) > 0: + return r.Name + ":" + r.Tag + default: + return r.Name + } +} + +// Exact returns a string representation of the set fields on the DockerImageReference +func (r DockerImageReference) Exact() string { + name := r.NameString() + if len(name) == 0 { + return name + } + s := r.Registry + if len(s) > 0 { + s += "/" + } + + if len(r.Namespace) != 0 { + s += r.Namespace + "/" + } + return s + name +} + +// String converts a DockerImageReference to a Docker pull spec (which implies a default namespace +// according to V1 Docker registry rules). Use Exact() if you want no defaulting. +func (r DockerImageReference) String() string { + if len(r.Namespace) == 0 && IsRegistryDockerHub(r.Registry) { + r.Namespace = "library" + } + return r.Exact() +} + +// IsRegistryDockerHub returns true if the given registry name belongs to +// Docker hub. +func IsRegistryDockerHub(registry string) bool { + switch registry { + case DockerDefaultRegistry, DockerDefaultV1Registry, DockerDefaultV2Registry: + return true + default: + return false + } +} + +// DeepCopyInto writing into out. in must be non-nil. +func (in *DockerImageReference) DeepCopyInto(out *DockerImageReference) { + *out = *in + return +} + +// DeepCopy copies the receiver, creating a new DockerImageReference. +func (in *DockerImageReference) DeepCopy() *DockerImageReference { + if in == nil { + return nil + } + out := new(DockerImageReference) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/library-go/pkg/monitor/health/metrics.go b/vendor/github.com/openshift/library-go/pkg/monitor/health/metrics.go new file mode 100644 index 0000000000000..8e4961bd5a9c2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/monitor/health/metrics.go @@ -0,0 +1,112 @@ +package health + +import ( + compbasemetrics "k8s.io/component-base/metrics" +) + +type registerables []compbasemetrics.Registerable + +var ( + healthyTargetsTotal = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "health_monitor_healthy_target_total", + Help: "Number of healthy instances registered with the health monitor. Partitioned by targets.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"target"}, + ) + + currentHealthyTargets = compbasemetrics.NewGauge( + &compbasemetrics.GaugeOpts{ + Name: "health_monitor_current_healthy_targets", + Help: "Number of currently healthy instances observed by the health monitor", + StabilityLevel: compbasemetrics.ALPHA, + }, + ) + + unHealthyTargetsTotal = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "health_monitor_unhealthy_target_total", + Help: "Number of unhealthy instances registered with the health monitor. Partitioned by targets.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"target"}, + ) + + readyzViolationRequestTotal = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "health_monitor_readyz_violation_request_total", + Help: "Number of HTTP requests partitioned by status code and target that violate the readyz protocol.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"code", "target"}, + ) + + metrics = registerables{ + healthyTargetsTotal, + currentHealthyTargets, + unHealthyTargetsTotal, + readyzViolationRequestTotal, + } +) + +// HealthyTargetsTotal increments the total number of healthy instances observed by the health monitor +func HealthyTargetsTotal(target string) { + healthyTargetsTotal.WithLabelValues(target).Add(1) +} + +// CurrentHealthyTargets keeps track of the current number of healthy targets observed by the health monitor +func CurrentHealthyTargets(count float64) { + currentHealthyTargets.Set(count) +} + +// UnHealthyTargetsTotal increments the total number of unhealthy instances observed by the health monitor +func UnHealthyTargetsTotal(target string) { + unHealthyTargetsTotal.WithLabelValues(target).Add(1) +} + +// ReadyzProtocolRequestTotal increments the total number of requests issues by the health monitor that violate the "readyz" protocol +// +// the "readyz" protocol defines the following HTTP status code: +// +// HTTP 200 - when the server operates normally +// HTTP 500 - when the server is not ready, for example, is undergoing a shutdown +func ReadyzProtocolRequestTotal(code, target string) { + readyzViolationRequestTotal.WithLabelValues(code, target).Add(1) +} + +// Metrics specifies a set of methods that are used to register various metrics +type Metrics struct { + // HealthyTargetsTotal increments the total number of healthy instances observed by the health monitor + HealthyTargetsTotal func(target string) + + // CurrentHealthyTargets keeps track of the current number of healthy targets observed by the health monitor + CurrentHealthyTargets func(count float64) + + // UnHealthyTargetsTotal increments the total number of unhealthy instances observed by the health monitor + UnHealthyTargetsTotal func(target string) + + // ReadyzProtocolRequestTotal increments the total number of requests issues by the health monitor that violate the "readyz" protocol + // + // the "readyz" protocol defines the following HTTP status code: + // HTTP 200 - when the server operates normally + // HTTP 500 - when the server is not ready, for example, is undergoing a shutdown + ReadyzProtocolRequestTotal func(code, target string) +} + +// Register is a way to register the health monitor related metrics in the provided store +func Register(registerFn func(...compbasemetrics.Registerable)) *Metrics { + registerFn(metrics...) + return &Metrics{ + HealthyTargetsTotal: HealthyTargetsTotal, + CurrentHealthyTargets: CurrentHealthyTargets, + UnHealthyTargetsTotal: UnHealthyTargetsTotal, + ReadyzProtocolRequestTotal: ReadyzProtocolRequestTotal, + } +} + +type noopMetrics struct{} + +func (noopMetrics) TargetsTotal(string) {} +func (noopMetrics) TargetsGauge(float64) {} +func (noopMetrics) TargetsWithCodeTotal(string, string) {} diff --git a/vendor/github.com/openshift/library-go/pkg/monitor/health/options.go b/vendor/github.com/openshift/library-go/pkg/monitor/health/options.go new file mode 100644 index 0000000000000..b909b7d5ae4e6 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/monitor/health/options.go @@ -0,0 +1,33 @@ +package health + +import "time" + +// WithUnHealthyProbesThreshold specifies consecutive failed health checks after which a target is considered unhealthy +func (sm *Prober) WithUnHealthyProbesThreshold(unhealthyProbesThreshold int) *Prober { + sm.unhealthyProbesThreshold = unhealthyProbesThreshold + return sm +} + +// WithHealthyProbesThreshold specifies consecutive successful health checks after which a target is considered healthy +func (sm *Prober) WithHealthyProbesThreshold(healthyProbesThreshold int) *Prober { + sm.healthyProbesThreshold = healthyProbesThreshold + return sm +} + +// WithProbeResponseTimeout specifies a time limit for requests made by the HTTP client for the health check +func (sm *Prober) WithProbeResponseTimeout(probeResponseTimeout time.Duration) *Prober { + sm.client.Timeout = probeResponseTimeout + return sm +} + +// WithProbeInterval specifies a time interval at which health checks are send +func (sm *Prober) WithProbeInterval(probeInterval time.Duration) *Prober { + sm.probeInterval = probeInterval + return sm +} + +// WithMetrics specifies a set of methods that are used to register various metrics +func (sm *Prober) WithMetrics(metrics *Metrics) *Prober { + sm.metrics = metrics + return sm +} diff --git a/vendor/github.com/openshift/library-go/pkg/monitor/health/prober.go b/vendor/github.com/openshift/library-go/pkg/monitor/health/prober.go new file mode 100644 index 0000000000000..cf805f7135de5 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/monitor/health/prober.go @@ -0,0 +1,377 @@ +package health + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strconv" + "sync" + "sync/atomic" + "time" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + utilnet "k8s.io/apimachinery/pkg/util/net" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/rest" + "k8s.io/client-go/transport" + "k8s.io/klog/v2" +) + +var ( + defaultProbeResponseTimeout = 1 * time.Second + defaultProbeInterval = 2 * time.Second + + defaultUnhealthyProbesThreshold = 3 + defaultHealthyProbesThreshold = 5 +) + +type Prober struct { + // targetProvider provides a list of targets to monitor + // it also can schedule refreshing the list by simply calling Enqueue method + targetProvider TargetProvider + + // client is an HTTP client that is used to probe health checks for targets + client *http.Client + + // probeInterval specifies a time interval at which health checks are send + probeInterval time.Duration + + // unhealthyProbesThreshold specifies consecutive failed health checks after which a target is considered unhealthy + unhealthyProbesThreshold int + + // healthyProbesThreshold specifies consecutive successful health checks after which a target is considered healthy + healthyProbesThreshold int + + healthyTargets []string + unhealthyTargets []string + targetsToMonitor []string + + consecutiveSuccessfulProbes map[string]int + consecutiveFailedProbes map[string][]error + + refreshTargetsLock sync.Mutex + refreshTargets bool + + // exportedHealthyTargets holds a copy of healthyTargets + exportedHealthyTargets atomic.Value + + // exportedUnhealthyTargets holds a copy of unhealthyTargets + exportedUnhealthyTargets atomic.Value + + // listeners holds a list of interested parties to be notified when the list of healthy targets changes + listeners []Listener + + // metrics specifies a set of methods that are used to register various metrics + metrics *Metrics +} + +var _ Listener = &Prober{} +var _ Notifier = &Prober{} + +// New creates a health monitor that periodically sends requests to the provided targets to check their health. +// +// The following methods allows you to configure behaviour of the monitor after creation. +// +// WithUnHealthyProbesThreshold - that specifies consecutive failed health checks after which a target is considered unhealthy +// the default value is: 3 +// +// WithHealthyProbesThreshold - that specifies consecutive successful health checks after which a target is considered healthy +// the default value is: 5 +// +// WithProbeResponseTimeout - that specifies a time limit for requests made by the HTTP client for the health check +// the default value is: 1 second +// +// WithProbeInterval - that specifies a time interval at which health checks are send +// the default value is: 2 seconds +// +// WithMetrics - that specifies a set of methods that are used to register various metrics +// the default value is: no metrics +// +// Additionally the monitor implements Listener and Notifier interfaces. +// +// The health monitor automatically registers for notification if the target provided also implements the Notifier interface. +// It is implicit so that the provider can provide a static or a dynamic list of targets. +// +// Interested parties can register a listener for notifications about healthy/unhealthy targets changes via AddListener. +// TODO: instead of restConfig we could accept transport so that it is reused instead of creating a new connection to targets +// +// reusing the transport has the advantage of using the same connection as other clients +func New(targetProvider TargetProvider, restConfig *rest.Config) (*Prober, error) { + client, err := createHealthCheckHTTPClient(defaultProbeResponseTimeout, restConfig) + if err != nil { + return nil, err + } + + hm := &Prober{ + client: client, + targetProvider: targetProvider, + targetsToMonitor: targetProvider.CurrentTargetsList(), + probeInterval: defaultProbeInterval, + unhealthyProbesThreshold: defaultUnhealthyProbesThreshold, + healthyProbesThreshold: defaultHealthyProbesThreshold, + + consecutiveSuccessfulProbes: map[string]int{}, + consecutiveFailedProbes: map[string][]error{}, + + metrics: &Metrics{ + HealthyTargetsTotal: noopMetrics{}.TargetsTotal, + CurrentHealthyTargets: noopMetrics{}.TargetsGauge, + UnHealthyTargetsTotal: noopMetrics{}.TargetsTotal, + ReadyzProtocolRequestTotal: noopMetrics{}.TargetsWithCodeTotal, + }, + } + hm.exportedHealthyTargets.Store([]string{}) + hm.exportedUnhealthyTargets.Store([]string{}) + + if notifier, ok := targetProvider.(Notifier); ok { + notifier.AddListener(hm) + } + + return hm, nil +} + +// Run starts monitoring the provided targets until stop channel is closed +// This method is blocking and it is meant to be launched in a separate goroutine +func (sm *Prober) Run(ctx context.Context) { + defer utilruntime.HandleCrash() + + klog.Infof("Starting the health monitor with Interval = %v, Timeout = %v, HealthyThreshold = %v, UnhealthyThreshold = %v ", sm.probeInterval, sm.client.Timeout, sm.healthyProbesThreshold, sm.unhealthyProbesThreshold) + defer klog.Info("Shutting down the health monitor") + + wait.Until(sm.healthCheckRegisteredTargets, sm.probeInterval, ctx.Done()) +} + +// Enqueue schedules refreshing the target list on the next probeInterval +// This method is used by the TargetProvider to notify that the list has changed +func (sm *Prober) Enqueue() { + sm.refreshTargetsLock.Lock() + defer sm.refreshTargetsLock.Unlock() + sm.refreshTargets = true +} + +// Targets returns a list of healthy and unhealthy targets +func (sm *Prober) Targets() ([]string, []string) { + return sm.exportedHealthyTargets.Load().([]string), sm.exportedUnhealthyTargets.Load().([]string) +} + +// AddListener adds a listener to be notified when the list of healthy targets changes +// +// Note: +// this method is not thread safe and mustn't be called after calling StartMonitoring() method +func (sm *Prober) AddListener(listener Listener) { + sm.listeners = append(sm.listeners, listener) +} + +type targetErrTuple struct { + target string + err error +} + +// refreshTargetsLocked updates the internal targets list to monitor if it was requested (via the Enqueue method) +func (sm *Prober) refreshTargetsLocked() { + sm.refreshTargetsLock.Lock() + defer sm.refreshTargetsLock.Unlock() + if !sm.refreshTargets { + return + } + + sm.refreshTargets = false + freshTargets := sm.targetProvider.CurrentTargetsList() + freshTargetSet := sets.New(freshTargets...) + + currentTargetsSet := sets.New(sm.targetsToMonitor...) + newTargetsToMonitorSet := freshTargetSet.Difference(currentTargetsSet) + if newTargetsToMonitorSet.Len() > 0 { + klog.V(2).Infof("health monitor observed new targets = %v", sets.List(newTargetsToMonitorSet)) + } + + removedTargetsToMonitorSet := currentTargetsSet.Difference(freshTargetSet) + if removedTargetsToMonitorSet.Len() > 0 { + klog.V(2).Infof("health monitor will stop checking the following targets targets = %v", sets.List(removedTargetsToMonitorSet)) + for targetToRemove := range removedTargetsToMonitorSet { + delete(sm.consecutiveSuccessfulProbes, targetToRemove) + delete(sm.consecutiveFailedProbes, targetToRemove) + } + + healthyTargetsSet := sets.New(sm.healthyTargets...) + healthyTargetsSet.Delete(removedTargetsToMonitorSet.UnsortedList()...) + sm.healthyTargets = sets.List(healthyTargetsSet) + + unhealthyTargetsSet := sets.New(sm.unhealthyTargets...) + unhealthyTargetsSet.Delete(removedTargetsToMonitorSet.UnsortedList()...) + sm.unhealthyTargets = sets.List(unhealthyTargetsSet) + } + + sm.targetsToMonitor = freshTargets +} + +func (sm *Prober) healthCheckRegisteredTargets() { + sm.refreshTargetsLocked() + var wg sync.WaitGroup + resTargetErrTupleCh := make(chan targetErrTuple, len(sm.targetsToMonitor)) + + for i := 0; i < len(sm.targetsToMonitor); i++ { + wg.Add(1) + go func(target string) { + defer wg.Done() + err := sm.healthCheckSingleTarget(target) + resTargetErrTupleCh <- targetErrTuple{target, err} + }(sm.targetsToMonitor[i]) + } + wg.Wait() + close(resTargetErrTupleCh) + + currentHealthCheckProbes := make([]targetErrTuple, 0, len(sm.targetsToMonitor)) + for svrErrTuple := range resTargetErrTupleCh { + currentHealthCheckProbes = append(currentHealthCheckProbes, svrErrTuple) + } + + sm.updateHealthChecksFor(currentHealthCheckProbes) +} + +// updateHealthChecksFor examines the health of targets based on the provided probes and the current configuration. +// It also notifies interested parties about changes in the health condition. +// Interested parties can be registered by calling AddListener method. +func (sm *Prober) updateHealthChecksFor(currentHealthCheckProbes []targetErrTuple) { + newUnhealthyTargets := []string{} + newHealthyTargets := []string{} + + for _, svrErrTuple := range currentHealthCheckProbes { + if svrErrTuple.err != nil { + delete(sm.consecutiveSuccessfulProbes, svrErrTuple.target) + + unhealthyProbesSlice := sm.consecutiveFailedProbes[svrErrTuple.target] + if len(unhealthyProbesSlice) < sm.unhealthyProbesThreshold { + unhealthyProbesSlice = append(unhealthyProbesSlice, svrErrTuple.err) + sm.consecutiveFailedProbes[svrErrTuple.target] = unhealthyProbesSlice + if len(unhealthyProbesSlice) == sm.unhealthyProbesThreshold { + newUnhealthyTargets = append(newUnhealthyTargets, svrErrTuple.target) + } + } + continue + } + + delete(sm.consecutiveFailedProbes, svrErrTuple.target) + + healthyProbesCounter := sm.consecutiveSuccessfulProbes[svrErrTuple.target] + if healthyProbesCounter < sm.healthyProbesThreshold { + healthyProbesCounter++ + sm.consecutiveSuccessfulProbes[svrErrTuple.target] = healthyProbesCounter + if healthyProbesCounter == sm.healthyProbesThreshold { + newHealthyTargets = append(newHealthyTargets, svrErrTuple.target) + } + } + } + + newUnhealthyTargetsSet := sets.New(newUnhealthyTargets...) + newHealthyTargetsSet := sets.New(newHealthyTargets...) + notifyListeners := false + + // detect unhealthy targets + previouslyUnhealthyTargetsSet := sets.New(sm.unhealthyTargets...) + currentlyUnhealthyTargetsSet := previouslyUnhealthyTargetsSet.Union(newUnhealthyTargetsSet) + currentlyUnhealthyTargetsSet.Delete(newHealthyTargetsSet.UnsortedList()...) + if !currentlyUnhealthyTargetsSet.Equal(previouslyUnhealthyTargetsSet) { + sm.unhealthyTargets = sets.List(currentlyUnhealthyTargetsSet) + klog.V(2).Infof("observed the following unhealthy targets %v", sm.unhealthyTargets) + logUnhealthyTargets(sm.unhealthyTargets, currentHealthCheckProbes) + + exportedUnhealthyTargets := make([]string, len(sm.unhealthyTargets)) + for index, unhealthyTarget := range sm.unhealthyTargets { + exportedUnhealthyTargets[index] = unhealthyTarget + sm.metrics.UnHealthyTargetsTotal(unhealthyTarget) + } + sm.exportedUnhealthyTargets.Store(exportedUnhealthyTargets) + notifyListeners = true + } + + // detect healthy targets + previouslyHealthyTargetsSet := sets.New(sm.healthyTargets...) + currentlyHealthyTargetsSet := previouslyHealthyTargetsSet.Union(newHealthyTargetsSet) + currentlyHealthyTargetsSet.Delete(newUnhealthyTargetsSet.UnsortedList()...) + if !currentlyHealthyTargetsSet.Equal(previouslyHealthyTargetsSet) { + sm.healthyTargets = sets.List(currentlyHealthyTargetsSet) + klog.V(2).Infof("observed the following healthy targets %v", sm.healthyTargets) + + exportedHealthyTargets := make([]string, len(sm.healthyTargets)) + for index, healthyTarget := range sm.healthyTargets { + exportedHealthyTargets[index] = healthyTarget + sm.metrics.HealthyTargetsTotal(healthyTarget) + } + sm.exportedHealthyTargets.Store(exportedHealthyTargets) + notifyListeners = true + } + + if notifyListeners { + // something has changed update the currently healthy targets metric + sm.metrics.CurrentHealthyTargets(float64(len(sm.healthyTargets))) + + // notify listeners about the new healthy/unhealthy targets + for _, listener := range sm.listeners { + listener.Enqueue() + } + } +} + +func (sm *Prober) healthCheckSingleTarget(target string) error { + // TODO: make the protocol, port and the path configurable + targetURL, err := url.Parse(fmt.Sprintf("https://%s/%s", target, "readyz")) + if err != nil { + return err + } + newReq, err := http.NewRequest("GET", targetURL.String(), nil) + if err != nil { + return err + } + + resp, err := sm.client.Do(newReq) + if err != nil { + sm.metrics.ReadyzProtocolRequestTotal("", target) + return err + } + resp.Body.Close() + if resp.StatusCode != http.StatusOK { + if resp.StatusCode != http.StatusInternalServerError { + sm.metrics.ReadyzProtocolRequestTotal(strconv.Itoa(resp.StatusCode), target) + } + return fmt.Errorf("bad status from %v: %v, expected HTTP 200", targetURL.String(), resp.StatusCode) + } + + return err +} + +func createHealthCheckHTTPClient(responseTimeout time.Duration, restConfig *rest.Config) (*http.Client, error) { + transportConfig, err := restConfig.TransportConfig() + if err != nil { + return nil, err + } + + tlsConfig, err := transport.TLSConfigFor(transportConfig) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: utilnet.SetTransportDefaults(&http.Transport{ + TLSClientConfig: tlsConfig, + }), + Timeout: responseTimeout, + } + + return client, nil +} + +func logUnhealthyTargets(unhealthyTargets []string, currentHealthCheckProbes []targetErrTuple) { + for _, unhealthyTarget := range unhealthyTargets { + errorsForUnhealthyTarget := []error{} + for _, svrErrTuple := range currentHealthCheckProbes { + if svrErrTuple.target == unhealthyTarget { + errorsForUnhealthyTarget = append(errorsForUnhealthyTarget, svrErrTuple.err) + } + } + klog.V(2).Infof("the following target %v became unhealthy due to %v", unhealthyTarget, utilerrors.NewAggregate(errorsForUnhealthyTarget).Error()) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/monitor/health/target_resolver.go b/vendor/github.com/openshift/library-go/pkg/monitor/health/target_resolver.go new file mode 100644 index 0000000000000..7bb8ae5274613 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/monitor/health/target_resolver.go @@ -0,0 +1,28 @@ +package health + +// Listener is an interface to use to notify interested parties of a change. +type Listener interface { + // Enqueue should be called when an input may have changed + Enqueue() +} + +// Notifier is a way to add listeners +type Notifier interface { + // AddListener is adds a listener to be notified of potential input changes + AddListener(listener Listener) +} + +// TargetProviders is an interface to use to get a list of targets to monitor +type TargetProvider interface { + // CurrentTargetsList returns a precomputed list of targets + CurrentTargetsList() []string +} + +// StaticTargetProvider implements TargetProvider and provides a static list of targets +type StaticTargetProvider []string + +var _ TargetProvider = StaticTargetProvider{} + +func (sp StaticTargetProvider) CurrentTargetsList() []string { + return sp +} diff --git a/vendor/github.com/openshift/library-go/pkg/network/dialer.go b/vendor/github.com/openshift/library-go/pkg/network/dialer.go new file mode 100644 index 0000000000000..f19be44a3e994 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/network/dialer.go @@ -0,0 +1,13 @@ +package network + +import ( + "context" + "net" +) + +type DialContext func(ctx context.Context, network, address string) (net.Conn, error) + +// DefaultDialContext returns a DialContext function from a network dialer with default options sets. +func DefaultClientDialContext() DialContext { + return dialerWithDefaultOptions() +} diff --git a/vendor/github.com/openshift/library-go/pkg/network/dialer_linux.go b/vendor/github.com/openshift/library-go/pkg/network/dialer_linux.go new file mode 100644 index 0000000000000..f75c740ee5b9c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/network/dialer_linux.go @@ -0,0 +1,94 @@ +//go:build linux +// +build linux + +package network + +import ( + "net" + "os" + "syscall" + "time" + + "golang.org/x/sys/unix" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" +) + +func dialerWithDefaultOptions() DialContext { + nd := &net.Dialer{ + // TCP_USER_TIMEOUT does affect the behaviour of connect() which is controlled by this field so we set it to the same value + Timeout: 25 * time.Second, + // KeepAlive must to be set to a negative value to stop std library from applying the default values + // by doing so we ensure that the options we are interested in won't be overwritten + KeepAlive: time.Duration(-1), + Control: func(network, address string, con syscall.RawConn) error { + var errs []error + err := con.Control(func(fd uintptr) { + optionsErr := setDefaultSocketOptions(int(fd)) + if optionsErr != nil { + errs = append(errs, optionsErr) + } + }) + if err != nil { + errs = append(errs, err) + } + return utilerrors.NewAggregate(errs) + }, + } + return nd.DialContext +} + +// setDefaultSocketOptions sets custom socket options so that we can detect connections to an unhealthy (dead) peer quickly. +// In particular we set TCP_USER_TIMEOUT that specifies the maximum amount of time that transmitted data may remain +// unacknowledged before TCP will forcibly close the connection. +// +// Note +// TCP_USER_TIMEOUT can't be too low because a single dropped packet might drop the entire connection. +// Ideally it should be set to: TCP_KEEPIDLE + TCP_KEEPINTVL * TCP_KEEPCNT +func setDefaultSocketOptions(fd int) error { + // specifies the maximum amount of time in milliseconds that transmitted data may remain + // unacknowledged before TCP will forcibly close the corresponding connection and return ETIMEDOUT to the application + tcpUserTimeoutInMilliSeconds := int(25 * time.Second / time.Millisecond) + + // specifies the interval at which probes are sent in seconds + tcpKeepIntvl := int(roundDuration(5*time.Second, time.Second)) + + // specifies the threshold for sending the first KEEP ALIVE probe in seconds + tcpKeepIdle := int(roundDuration(2*time.Second, time.Second)) + + // enable keep-alive probes + if err := syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_KEEPALIVE, 1); err != nil { + return wrapSyscallError("setsockopt", err) + } + + if err := syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, tcpUserTimeoutInMilliSeconds); err != nil { + return wrapSyscallError("setsockopt", err) + } + + if err := syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, tcpKeepIntvl); err != nil { + return wrapSyscallError("setsockopt", err) + } + + if err := syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, syscall.TCP_KEEPIDLE, tcpKeepIdle); err != nil { + return wrapSyscallError("setsockopt", err) + } + return nil +} + +// roundDurationUp rounds d to the next multiple of to. +// +// note that it was copied from the std library +func roundDuration(d time.Duration, to time.Duration) time.Duration { + return (d + to - 1) / to +} + +// wrapSyscallError takes an error and a syscall name. If the error is +// a syscall.Errno, it wraps it in a os.SyscallError using the syscall name. +// +// note that it was copied from the std library +func wrapSyscallError(name string, err error) error { + if _, ok := err.(syscall.Errno); ok { + err = os.NewSyscallError(name, err) + } + return err +} diff --git a/vendor/github.com/openshift/library-go/pkg/network/dialer_others.go b/vendor/github.com/openshift/library-go/pkg/network/dialer_others.go new file mode 100644 index 0000000000000..815636d1c764a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/network/dialer_others.go @@ -0,0 +1,20 @@ +//go:build !linux +// +build !linux + +package network + +import ( + "net" + "time" + + "k8s.io/klog/v2" +) + +func dialerWithDefaultOptions() DialContext { + klog.V(2).Info("Creating the default network Dialer (unsupported platform). It may take up to 15 minutes to detect broken connections and establish a new one") + nd := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + } + return nd.DialContext +} diff --git a/vendor/github.com/openshift/library-go/pkg/oauth/oauthdiscovery/discovery.go b/vendor/github.com/openshift/library-go/pkg/oauth/oauthdiscovery/discovery.go new file mode 100644 index 0000000000000..713a404208f73 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/oauth/oauthdiscovery/discovery.go @@ -0,0 +1,32 @@ +package oauthdiscovery + +// OauthAuthorizationServerMetadata holds OAuth 2.0 Authorization Server Metadata used for discovery +// https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 +type OauthAuthorizationServerMetadata struct { + // The authorization server's issuer identifier, which is a URL that uses the https scheme and has no query or fragment components. + // This is the location where .well-known RFC 5785 [RFC5785] resources containing information about the authorization server are published. + Issuer string `json:"issuer"` + + // URL of the authorization server's authorization endpoint [RFC6749]. + AuthorizationEndpoint string `json:"authorization_endpoint"` + + // URL of the authorization server's token endpoint [RFC6749]. + TokenEndpoint string `json:"token_endpoint"` + + // JSON array containing a list of the OAuth 2.0 [RFC6749] scope values that this authorization server supports. + // Servers MAY choose not to advertise some supported scope values even when this parameter is used. + ScopesSupported []string `json:"scopes_supported"` + + // JSON array containing a list of the OAuth 2.0 response_type values that this authorization server supports. + // The array values used are the same as those used with the response_types parameter defined by "OAuth 2.0 Dynamic Client Registration Protocol" [RFC7591]. + ResponseTypesSupported []string `json:"response_types_supported"` + + // JSON array containing a list of the OAuth 2.0 grant type values that this authorization server supports. + // The array values used are the same as those used with the grant_types parameter defined by "OAuth 2.0 Dynamic Client Registration Protocol" [RFC7591]. + GrantTypesSupported []string `json:"grant_types_supported"` + + // JSON array containing a list of PKCE [RFC7636] code challenge methods supported by this authorization server. + // Code challenge method values are used in the "code_challenge_method" parameter defined in Section 4.3 of [RFC7636]. + // The valid code challenge method values are those registered in the IANA "PKCE Code Challenge Methods" registry [IANA.OAuth.Parameters]. + CodeChallengeMethodsSupported []string `json:"code_challenge_methods_supported"` +} diff --git a/vendor/github.com/openshift/library-go/pkg/oauth/oauthdiscovery/urls.go b/vendor/github.com/openshift/library-go/pkg/oauth/oauthdiscovery/urls.go new file mode 100644 index 0000000000000..2539d4a391679 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/oauth/oauthdiscovery/urls.go @@ -0,0 +1,37 @@ +package oauthdiscovery + +import ( + "path" + "strings" +) + +const ( + AuthorizePath = "/authorize" + TokenPath = "/token" + InfoPath = "/info" + + RequestTokenEndpoint = "/token/request" + DisplayTokenEndpoint = "/token/display" + ImplicitTokenEndpoint = "/token/implicit" +) + +const OpenShiftOAuthAPIPrefix = "/oauth" + +func OpenShiftOAuthAuthorizeURL(masterAddr string) string { + return openShiftOAuthURL(masterAddr, AuthorizePath) +} +func OpenShiftOAuthTokenURL(masterAddr string) string { + return openShiftOAuthURL(masterAddr, TokenPath) +} +func OpenShiftOAuthTokenRequestURL(masterAddr string) string { + return openShiftOAuthURL(masterAddr, RequestTokenEndpoint) +} +func OpenShiftOAuthTokenDisplayURL(masterAddr string) string { + return openShiftOAuthURL(masterAddr, DisplayTokenEndpoint) +} +func OpenShiftOAuthTokenImplicitURL(masterAddr string) string { + return openShiftOAuthURL(masterAddr, ImplicitTokenEndpoint) +} +func openShiftOAuthURL(masterAddr, oauthEndpoint string) string { + return strings.TrimRight(masterAddr, "/") + path.Join(OpenShiftOAuthAPIPrefix, oauthEndpoint) +} diff --git a/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/clusterquotamapping.go b/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/clusterquotamapping.go new file mode 100644 index 0000000000000..fc88dc807504f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/clusterquotamapping.go @@ -0,0 +1,409 @@ +package clusterquotamapping + +import ( + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + kapierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + corev1informers "k8s.io/client-go/informers/core/v1" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + + quotav1 "github.com/openshift/api/quota/v1" + quotainformer "github.com/openshift/client-go/quota/informers/externalversions/quota/v1" + quotalister "github.com/openshift/client-go/quota/listers/quota/v1" +) + +// Look out, here there be dragons! +// There is a race when dealing with the DeltaFifo compression used to back a reflector for a controller that uses two +// SharedInformers for both their watch events AND their caches. The scenario looks like this +// +// 1. Add, Delete a namespace really fast, *before* the add is observed by the controller using the reflector. +// 2. Add or Update a quota that matches the Add namespace +// 3. The cache had the intermediate state for the namespace for some period of time. This makes the quota update the mapping indicating a match. +// 4. The ns Delete is compressed out and never delivered to the controller, so the improper match is never cleared. +// +// This sounds pretty bad, however, we fail in the "safe" direction and the consequences are detectable. +// When going from quota to namespace, you can get back a namespace that doesn't exist. There are no resource in a non-existence +// namespace, so you know to clear all referenced resources. In addition, this add/delete has to happen so fast +// that it would be nearly impossible for any resources to be created. If you do create resources, then we must be observing +// their deletes. When quota is replenished, we'll see that we need to clear any charges. +// +// When going from namespace to quota, you can get back a quota that doesn't exist. Since the cache is shared, +// we know that a missing quota means that there isn't anything for us to bill against, so we can skip it. +// +// If the mapping cache is wrong and a previously deleted quota or namespace is created, this controller +// correctly adds the items back to the list and clears out all previous mappings. +// +// In addition to those constraints, the timing threshold for actually hitting this problem is really tight. It's +// basically a script that is creating and deleting things as fast as it possibly can. Sub-millisecond in the fuzz +// test where I caught the problem. + +// NewClusterQuotaMappingController builds a mapping between namespaces and clusterresourcequotas +func NewClusterQuotaMappingController(namespaceInformer corev1informers.NamespaceInformer, quotaInformer quotainformer.ClusterResourceQuotaInformer) *ClusterQuotaMappingController { + c := newClusterQuotaMappingController(namespaceInformer.Informer(), quotaInformer) + c.namespaceLister = v1NamespaceLister{lister: namespaceInformer.Lister()} + return c +} + +type namespaceLister interface { + Each(label labels.Selector, fn func(metav1.Object) bool) error + Get(name string) (metav1.Object, error) +} + +type v1NamespaceLister struct { + lister corev1listers.NamespaceLister +} + +func (l v1NamespaceLister) Each(label labels.Selector, fn func(metav1.Object) bool) error { + results, err := l.lister.List(label) + if err != nil { + return err + } + for i := range results { + if !fn(results[i]) { + return nil + } + } + return nil +} +func (l v1NamespaceLister) Get(name string) (metav1.Object, error) { + return l.lister.Get(name) +} + +func newClusterQuotaMappingController(namespaceInformer cache.SharedIndexInformer, quotaInformer quotainformer.ClusterResourceQuotaInformer) *ClusterQuotaMappingController { + c := &ClusterQuotaMappingController{ + namespaceQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "controller_clusterquotamappingcontroller_namespaces"), + quotaQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "controller_clusterquotamappingcontroller_clusterquotas"), + clusterQuotaMapper: NewClusterQuotaMapper(), + } + namespaceInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: c.addNamespace, + UpdateFunc: c.updateNamespace, + DeleteFunc: c.deleteNamespace, + }) + c.namespacesSynced = namespaceInformer.HasSynced + + quotaInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: c.addQuota, + UpdateFunc: c.updateQuota, + DeleteFunc: c.deleteQuota, + }) + c.quotaLister = quotaInformer.Lister() + c.quotasSynced = quotaInformer.Informer().HasSynced + + return c +} + +type ClusterQuotaMappingController struct { + namespaceQueue workqueue.RateLimitingInterface + namespaceLister namespaceLister + namespacesSynced func() bool + + quotaQueue workqueue.RateLimitingInterface + quotaLister quotalister.ClusterResourceQuotaLister + quotasSynced func() bool + + clusterQuotaMapper *clusterQuotaMapper +} + +func (c *ClusterQuotaMappingController) GetClusterQuotaMapper() ClusterQuotaMapper { + return c.clusterQuotaMapper +} + +func (c *ClusterQuotaMappingController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.namespaceQueue.ShutDown() + defer c.quotaQueue.ShutDown() + + klog.Infof("Starting ClusterQuotaMappingController controller") + defer klog.Infof("Shutting down ClusterQuotaMappingController controller") + + if !cache.WaitForCacheSync(stopCh, c.namespacesSynced, c.quotasSynced) { + utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) + return + } + + klog.V(4).Infof("Starting workers for quota mapping controller workers") + for i := 0; i < workers; i++ { + go wait.Until(c.namespaceWorker, time.Second, stopCh) + go wait.Until(c.quotaWorker, time.Second, stopCh) + } + + <-stopCh +} + +func (c *ClusterQuotaMappingController) syncQuota(quota *quotav1.ClusterResourceQuota) error { + matcherFunc, err := GetObjectMatcher(quota.Spec.Selector) + if err != nil { + return err + } + + if err := c.namespaceLister.Each(labels.Everything(), func(obj metav1.Object) bool { + // attempt to set the mapping. The quotas never collide with each other (same quota is never processed twice in parallel) + // so this means that the project we have is out of date, pull a more recent copy from the cache and retest + for { + matches, err := matcherFunc(obj) + if err != nil { + utilruntime.HandleError(err) + break + } + success, quotaMatches, _ := c.clusterQuotaMapper.setMapping(quota, obj, !matches) + if success { + break + } + + // if the quota is mismatched, then someone has updated the quota or has deleted the entry entirely. + // if we've been updated, we'll be rekicked, if we've been deleted we should stop. Either way, this + // execution is finished + if !quotaMatches { + return false + } + newer, err := c.namespaceLister.Get(obj.GetName()) + if kapierrors.IsNotFound(err) { + // if the namespace is gone, then the deleteNamespace path will be called, just continue + break + } + if err != nil { + utilruntime.HandleError(err) + break + } + obj = newer + } + return true + }); err != nil { + return err + } + + c.clusterQuotaMapper.completeQuota(quota) + return nil +} + +func (c *ClusterQuotaMappingController) syncNamespace(namespace metav1.Object) error { + allQuotas, err1 := c.quotaLister.List(labels.Everything()) + if err1 != nil { + return err1 + } + for i := range allQuotas { + quota := allQuotas[i] + + for { + matcherFunc, err := GetObjectMatcher(quota.Spec.Selector) + if err != nil { + utilruntime.HandleError(err) + break + } + + // attempt to set the mapping. The namespaces never collide with each other (same namespace is never processed twice in parallel) + // so this means that the quota we have is out of date, pull a more recent copy from the cache and retest + matches, err := matcherFunc(namespace) + if err != nil { + utilruntime.HandleError(err) + break + } + success, _, namespaceMatches := c.clusterQuotaMapper.setMapping(quota, namespace, !matches) + if success { + break + } + + // if the namespace is mismatched, then someone has updated the namespace or has deleted the entry entirely. + // if we've been updated, we'll be rekicked, if we've been deleted we should stop. Either way, this + // execution is finished + if !namespaceMatches { + return nil + } + + quota, err = c.quotaLister.Get(quota.Name) + if kapierrors.IsNotFound(err) { + // if the quota is gone, then the deleteQuota path will be called, just continue + break + } + if err != nil { + utilruntime.HandleError(err) + break + } + } + } + + c.clusterQuotaMapper.completeNamespace(namespace) + return nil +} + +func (c *ClusterQuotaMappingController) quotaWork() bool { + key, quit := c.quotaQueue.Get() + if quit { + return true + } + defer c.quotaQueue.Done(key) + + quota, err := c.quotaLister.Get(key.(string)) + if err != nil { + if errors.IsNotFound(err) { + c.quotaQueue.Forget(key) + return false + } + utilruntime.HandleError(err) + return false + } + + err = c.syncQuota(quota) + outOfRetries := c.quotaQueue.NumRequeues(key) > 5 + switch { + case err != nil && outOfRetries: + utilruntime.HandleError(err) + c.quotaQueue.Forget(key) + + case err != nil && !outOfRetries: + c.quotaQueue.AddRateLimited(key) + + default: + c.quotaQueue.Forget(key) + } + + return false +} + +func (c *ClusterQuotaMappingController) quotaWorker() { + for { + if quit := c.quotaWork(); quit { + return + } + } +} + +func (c *ClusterQuotaMappingController) namespaceWork() bool { + key, quit := c.namespaceQueue.Get() + if quit { + return true + } + defer c.namespaceQueue.Done(key) + + namespace, err := c.namespaceLister.Get(key.(string)) + if kapierrors.IsNotFound(err) { + c.namespaceQueue.Forget(key) + return false + } + if err != nil { + utilruntime.HandleError(err) + return false + } + + err = c.syncNamespace(namespace) + outOfRetries := c.namespaceQueue.NumRequeues(key) > 5 + switch { + case err != nil && outOfRetries: + utilruntime.HandleError(err) + c.namespaceQueue.Forget(key) + + case err != nil && !outOfRetries: + c.namespaceQueue.AddRateLimited(key) + + default: + c.namespaceQueue.Forget(key) + } + + return false +} + +func (c *ClusterQuotaMappingController) namespaceWorker() { + for { + if quit := c.namespaceWork(); quit { + return + } + } +} + +func (c *ClusterQuotaMappingController) deleteNamespace(obj interface{}) { + var name string + switch ns := obj.(type) { + case cache.DeletedFinalStateUnknown: + switch nested := ns.Obj.(type) { + case *corev1.Namespace: + name = nested.Name + default: + utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a Namespace %T", ns.Obj)) + return + } + case *corev1.Namespace: + name = ns.Name + default: + utilruntime.HandleError(fmt.Errorf("not a Namespace %v", obj)) + return + } + c.clusterQuotaMapper.removeNamespace(name) +} + +func (c *ClusterQuotaMappingController) addNamespace(cur interface{}) { + c.enqueueNamespace(cur) +} +func (c *ClusterQuotaMappingController) updateNamespace(old, cur interface{}) { + c.enqueueNamespace(cur) +} +func (c *ClusterQuotaMappingController) enqueueNamespace(obj interface{}) { + switch ns := obj.(type) { + case *corev1.Namespace: + if !c.clusterQuotaMapper.requireNamespace(ns) { + return + } + default: + utilruntime.HandleError(fmt.Errorf("not a Namespace %v", obj)) + return + } + + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(err) + return + } + c.namespaceQueue.Add(key) +} + +func (c *ClusterQuotaMappingController) deleteQuota(obj interface{}) { + quota, ok1 := obj.(*quotav1.ClusterResourceQuota) + if !ok1 { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %v", obj)) + return + } + quota, ok = tombstone.Obj.(*quotav1.ClusterResourceQuota) + if !ok { + utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a Quota %v", obj)) + return + } + } + + c.clusterQuotaMapper.removeQuota(quota.Name) +} + +func (c *ClusterQuotaMappingController) addQuota(cur interface{}) { + c.enqueueQuota(cur) +} +func (c *ClusterQuotaMappingController) updateQuota(old, cur interface{}) { + c.enqueueQuota(cur) +} +func (c *ClusterQuotaMappingController) enqueueQuota(obj interface{}) { + quota, ok := obj.(*quotav1.ClusterResourceQuota) + if !ok { + utilruntime.HandleError(fmt.Errorf("not a Quota %v", obj)) + return + } + if !c.clusterQuotaMapper.requireQuota(quota) { + return + } + + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(quota) + if err != nil { + utilruntime.HandleError(err) + return + } + c.quotaQueue.Add(key) +} diff --git a/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/helpers.go b/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/helpers.go new file mode 100644 index 0000000000000..0c2c2ae7a5c3c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/helpers.go @@ -0,0 +1,139 @@ +package clusterquotamapping + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + + quotav1 "github.com/openshift/api/quota/v1" +) + +func GetResourceQuotasStatusByNamespace(namespaceStatuses quotav1.ResourceQuotasStatusByNamespace, namespace string) (corev1.ResourceQuotaStatus, bool) { + for i := range namespaceStatuses { + curr := namespaceStatuses[i] + if curr.Namespace == namespace { + return curr.Status, true + } + } + return corev1.ResourceQuotaStatus{}, false +} + +func RemoveResourceQuotasStatusByNamespace(namespaceStatuses *quotav1.ResourceQuotasStatusByNamespace, namespace string) { + newNamespaceStatuses := quotav1.ResourceQuotasStatusByNamespace{} + for i := range *namespaceStatuses { + curr := (*namespaceStatuses)[i] + if curr.Namespace == namespace { + continue + } + newNamespaceStatuses = append(newNamespaceStatuses, curr) + } + *namespaceStatuses = newNamespaceStatuses +} + +func InsertResourceQuotasStatus(namespaceStatuses *quotav1.ResourceQuotasStatusByNamespace, newStatus quotav1.ResourceQuotaStatusByNamespace) { + newNamespaceStatuses := quotav1.ResourceQuotasStatusByNamespace{} + found := false + for i := range *namespaceStatuses { + curr := (*namespaceStatuses)[i] + if curr.Namespace == newStatus.Namespace { + // do this so that we don't change serialization order + newNamespaceStatuses = append(newNamespaceStatuses, newStatus) + found = true + continue + } + newNamespaceStatuses = append(newNamespaceStatuses, curr) + } + if !found { + newNamespaceStatuses = append(newNamespaceStatuses, newStatus) + } + *namespaceStatuses = newNamespaceStatuses +} + +var accessor = meta.NewAccessor() + +func GetMatcher(selector quotav1.ClusterResourceQuotaSelector) (func(obj runtime.Object) (bool, error), error) { + var labelSelector labels.Selector + if selector.LabelSelector != nil { + var err error + labelSelector, err = metav1.LabelSelectorAsSelector(selector.LabelSelector) + if err != nil { + return nil, err + } + } + + var annotationSelector map[string]string + if len(selector.AnnotationSelector) > 0 { + // ensure our matcher has a stable copy of the map + annotationSelector = make(map[string]string, len(selector.AnnotationSelector)) + for k, v := range selector.AnnotationSelector { + annotationSelector[k] = v + } + } + + return func(obj runtime.Object) (bool, error) { + if labelSelector != nil { + objLabels, err := accessor.Labels(obj) + if err != nil { + return false, err + } + if !labelSelector.Matches(labels.Set(objLabels)) { + return false, nil + } + } + + if annotationSelector != nil { + objAnnotations, err := accessor.Annotations(obj) + if err != nil { + return false, err + } + for k, v := range annotationSelector { + if objValue, exists := objAnnotations[k]; !exists || objValue != v { + return false, nil + } + } + } + + return true, nil + }, nil +} + +func GetObjectMatcher(selector quotav1.ClusterResourceQuotaSelector) (func(obj metav1.Object) (bool, error), error) { + var labelSelector labels.Selector + if selector.LabelSelector != nil { + var err error + labelSelector, err = metav1.LabelSelectorAsSelector(selector.LabelSelector) + if err != nil { + return nil, err + } + } + + var annotationSelector map[string]string + if len(selector.AnnotationSelector) > 0 { + // ensure our matcher has a stable copy of the map + annotationSelector = make(map[string]string, len(selector.AnnotationSelector)) + for k, v := range selector.AnnotationSelector { + annotationSelector[k] = v + } + } + + return func(obj metav1.Object) (bool, error) { + if labelSelector != nil { + if !labelSelector.Matches(labels.Set(obj.GetLabels())) { + return false, nil + } + } + + if annotationSelector != nil { + objAnnotations := obj.GetAnnotations() + for k, v := range annotationSelector { + if objValue, exists := objAnnotations[k]; !exists || objValue != v { + return false, nil + } + } + } + + return true, nil + }, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/mapper.go b/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/mapper.go new file mode 100644 index 0000000000000..db9572425a38a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/mapper.go @@ -0,0 +1,289 @@ +package clusterquotamapping + +import ( + "reflect" + "sync" + + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + + quotav1 "github.com/openshift/api/quota/v1" +) + +type ClusterQuotaMapper interface { + // GetClusterQuotasFor returns the list of clusterquota names that this namespace matches. It also + // returns the selectionFields associated with the namespace for the check so that callers can determine staleness + GetClusterQuotasFor(namespaceName string) ([]string, SelectionFields) + // GetNamespacesFor returns the list of namespace names that this cluster quota matches. It also + // returns the selector associated with the clusterquota for the check so that callers can determine staleness + GetNamespacesFor(quotaName string) ([]string, quotav1.ClusterResourceQuotaSelector) + + AddListener(listener MappingChangeListener) +} + +// MappingChangeListener is notified of changes to the mapping. It must not block. +type MappingChangeListener interface { + AddMapping(quotaName, namespaceName string) + RemoveMapping(quotaName, namespaceName string) +} + +type SelectionFields struct { + Labels map[string]string + Annotations map[string]string +} + +// clusterQuotaMapper gives thread safe access to the actual mappings that are being stored. +// Many method use a shareable read lock to check status followed by a non-shareable +// write lock which double checks the condition before proceeding. Since locks aren't escalatable +// you have to perform the recheck because someone could have beaten you in. +type clusterQuotaMapper struct { + lock sync.RWMutex + + // requiredQuotaToSelector indicates the latest label selector this controller has observed for a quota + requiredQuotaToSelector map[string]quotav1.ClusterResourceQuotaSelector + // requiredNamespaceToLabels indicates the latest selectionFields this controller has observed for a namespace + requiredNamespaceToLabels map[string]SelectionFields + // completedQuotaToSelector indicates the latest label selector this controller has scanned against namespaces + completedQuotaToSelector map[string]quotav1.ClusterResourceQuotaSelector + // completedNamespaceToLabels indicates the latest selectionFields this controller has scanned against cluster quotas + completedNamespaceToLabels map[string]SelectionFields + + quotaToNamespaces map[string]sets.Set[string] + namespaceToQuota map[string]sets.Set[string] + + listeners []MappingChangeListener +} + +func NewClusterQuotaMapper() *clusterQuotaMapper { + return &clusterQuotaMapper{ + requiredQuotaToSelector: map[string]quotav1.ClusterResourceQuotaSelector{}, + requiredNamespaceToLabels: map[string]SelectionFields{}, + completedQuotaToSelector: map[string]quotav1.ClusterResourceQuotaSelector{}, + completedNamespaceToLabels: map[string]SelectionFields{}, + + quotaToNamespaces: map[string]sets.Set[string]{}, + namespaceToQuota: map[string]sets.Set[string]{}, + } +} + +func (m *clusterQuotaMapper) GetClusterQuotasFor(namespaceName string) ([]string, SelectionFields) { + m.lock.RLock() + defer m.lock.RUnlock() + + quotas, ok := m.namespaceToQuota[namespaceName] + if !ok { + return []string{}, m.completedNamespaceToLabels[namespaceName] + } + return sets.List(quotas), m.completedNamespaceToLabels[namespaceName] +} + +func (m *clusterQuotaMapper) GetNamespacesFor(quotaName string) ([]string, quotav1.ClusterResourceQuotaSelector) { + m.lock.RLock() + defer m.lock.RUnlock() + + namespaces, ok := m.quotaToNamespaces[quotaName] + if !ok { + return []string{}, m.completedQuotaToSelector[quotaName] + } + return sets.List(namespaces), m.completedQuotaToSelector[quotaName] +} + +func (m *clusterQuotaMapper) AddListener(listener MappingChangeListener) { + m.lock.Lock() + defer m.lock.Unlock() + + m.listeners = append(m.listeners, listener) +} + +// requireQuota updates the selector requirements for the given quota. This prevents stale updates to the mapping itself. +// returns true if a modification was made +func (m *clusterQuotaMapper) requireQuota(quota *quotav1.ClusterResourceQuota) bool { + m.lock.RLock() + selector, exists := m.requiredQuotaToSelector[quota.Name] + m.lock.RUnlock() + + if selectorMatches(selector, exists, quota) { + return false + } + + m.lock.Lock() + defer m.lock.Unlock() + selector, exists = m.requiredQuotaToSelector[quota.Name] + if selectorMatches(selector, exists, quota) { + return false + } + + m.requiredQuotaToSelector[quota.Name] = quota.Spec.Selector + return true +} + +// completeQuota updates the latest selector used to generate the mappings for this quota. The value is returned +// by the Get methods for the mapping so that callers can determine staleness +func (m *clusterQuotaMapper) completeQuota(quota *quotav1.ClusterResourceQuota) { + m.lock.Lock() + defer m.lock.Unlock() + m.completedQuotaToSelector[quota.Name] = quota.Spec.Selector +} + +// removeQuota deletes a quota from all mappings +func (m *clusterQuotaMapper) removeQuota(quotaName string) { + m.lock.Lock() + defer m.lock.Unlock() + + delete(m.requiredQuotaToSelector, quotaName) + delete(m.completedQuotaToSelector, quotaName) + delete(m.quotaToNamespaces, quotaName) + for namespaceName, quotas := range m.namespaceToQuota { + if quotas.Has(quotaName) { + quotas.Delete(quotaName) + for _, listener := range m.listeners { + listener.RemoveMapping(quotaName, namespaceName) + } + } + } +} + +// requireNamespace updates the label requirements for the given namespace. This prevents stale updates to the mapping itself. +// returns true if a modification was made +func (m *clusterQuotaMapper) requireNamespace(namespace metav1.Object) bool { + m.lock.RLock() + selectionFields, exists := m.requiredNamespaceToLabels[namespace.GetName()] + m.lock.RUnlock() + + if selectionFieldsMatch(selectionFields, exists, namespace) { + return false + } + + m.lock.Lock() + defer m.lock.Unlock() + selectionFields, exists = m.requiredNamespaceToLabels[namespace.GetName()] + if selectionFieldsMatch(selectionFields, exists, namespace) { + return false + } + + m.requiredNamespaceToLabels[namespace.GetName()] = GetSelectionFields(namespace) + return true +} + +// completeNamespace updates the latest selectionFields used to generate the mappings for this namespace. The value is returned +// by the Get methods for the mapping so that callers can determine staleness +func (m *clusterQuotaMapper) completeNamespace(namespace metav1.Object) { + m.lock.Lock() + defer m.lock.Unlock() + m.completedNamespaceToLabels[namespace.GetName()] = GetSelectionFields(namespace) +} + +// removeNamespace deletes a namespace from all mappings +func (m *clusterQuotaMapper) removeNamespace(namespaceName string) { + m.lock.Lock() + defer m.lock.Unlock() + + delete(m.requiredNamespaceToLabels, namespaceName) + delete(m.completedNamespaceToLabels, namespaceName) + delete(m.namespaceToQuota, namespaceName) + for quotaName, namespaces := range m.quotaToNamespaces { + if namespaces.Has(namespaceName) { + namespaces.Delete(namespaceName) + for _, listener := range m.listeners { + listener.RemoveMapping(quotaName, namespaceName) + } + } + } +} + +func selectorMatches(selector quotav1.ClusterResourceQuotaSelector, exists bool, quota *quotav1.ClusterResourceQuota) bool { + return exists && equality.Semantic.DeepEqual(selector, quota.Spec.Selector) +} +func selectionFieldsMatch(selectionFields SelectionFields, exists bool, namespace metav1.Object) bool { + return exists && reflect.DeepEqual(selectionFields, GetSelectionFields(namespace)) +} + +// setMapping maps (or removes a mapping) between a clusterquota and a namespace +// It returns whether the action worked, whether the quota is out of date, whether the namespace is out of date +// This allows callers to decide whether to pull new information from the cache or simply skip execution +func (m *clusterQuotaMapper) setMapping(quota *quotav1.ClusterResourceQuota, namespace metav1.Object, remove bool) (bool /*added*/, bool /*quota matches*/, bool /*namespace matches*/) { + m.lock.RLock() + selector, selectorExists := m.requiredQuotaToSelector[quota.Name] + selectionFields, selectionFieldsExist := m.requiredNamespaceToLabels[namespace.GetName()] + m.lock.RUnlock() + + if !selectorMatches(selector, selectorExists, quota) { + return false, false, selectionFieldsMatch(selectionFields, selectionFieldsExist, namespace) + } + if !selectionFieldsMatch(selectionFields, selectionFieldsExist, namespace) { + return false, true, false + } + + m.lock.Lock() + defer m.lock.Unlock() + selector, selectorExists = m.requiredQuotaToSelector[quota.Name] + selectionFields, selectionFieldsExist = m.requiredNamespaceToLabels[namespace.GetName()] + if !selectorMatches(selector, selectorExists, quota) { + return false, false, selectionFieldsMatch(selectionFields, selectionFieldsExist, namespace) + } + if !selectionFieldsMatch(selectionFields, selectionFieldsExist, namespace) { + return false, true, false + } + + if remove { + mutated := false + + namespaces, ok := m.quotaToNamespaces[quota.Name] + if !ok { + m.quotaToNamespaces[quota.Name] = sets.Set[string]{} + } else { + mutated = namespaces.Has(namespace.GetName()) + namespaces.Delete(namespace.GetName()) + } + + quotas, ok := m.namespaceToQuota[namespace.GetName()] + if !ok { + m.namespaceToQuota[namespace.GetName()] = sets.Set[string]{} + } else { + mutated = mutated || quotas.Has(quota.Name) + quotas.Delete(quota.Name) + } + + if mutated { + for _, listener := range m.listeners { + listener.RemoveMapping(quota.Name, namespace.GetName()) + } + } + + return true, true, true + } + + mutated := false + + namespaces, ok := m.quotaToNamespaces[quota.Name] + if !ok { + mutated = true + m.quotaToNamespaces[quota.Name] = sets.New(namespace.GetName()) + } else { + mutated = !namespaces.Has(namespace.GetName()) + namespaces.Insert(namespace.GetName()) + } + + quotas, ok := m.namespaceToQuota[namespace.GetName()] + if !ok { + mutated = true + m.namespaceToQuota[namespace.GetName()] = sets.New(quota.Name) + } else { + mutated = mutated || !quotas.Has(quota.Name) + quotas.Insert(quota.Name) + } + + if mutated { + for _, listener := range m.listeners { + listener.AddMapping(quota.Name, namespace.GetName()) + } + } + + return true, true, true + +} + +func GetSelectionFields(namespace metav1.Object) SelectionFields { + return SelectionFields{Labels: namespace.GetLabels(), Annotations: namespace.GetAnnotations()} +} diff --git a/vendor/github.com/openshift/library-go/pkg/quota/quotautil/error.go b/vendor/github.com/openshift/library-go/pkg/quota/quotautil/error.go new file mode 100644 index 0000000000000..14faf6bc025dc --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/quota/quotautil/error.go @@ -0,0 +1,42 @@ +package quotautil + +import ( + "strings" + + apierrs "k8s.io/apimachinery/pkg/api/errors" +) + +// errMessageString is a part of error message copied from quotaAdmission.Admit() method in +// k8s.io/kubernetes/plugin/pkg/admission/resourcequota/admission.go module +const errQuotaMessageString = `exceeded quota:` +const errQuotaUnknownMessageString = `status unknown for quota:` +const errLimitsMessageString = `exceeds the maximum limit` + +// IsErrorQuotaExceeded returns true if the given error stands for a denied request caused by detected quota +// abuse. +func IsErrorQuotaExceeded(err error) bool { + if isForbidden := apierrs.IsForbidden(err); isForbidden || apierrs.IsInvalid(err) { + lowered := strings.ToLower(err.Error()) + // the limit error message can be accompanied only by Invalid reason + if strings.Contains(lowered, errLimitsMessageString) { + return true + } + // the quota error message can be accompanied only by Forbidden reason + if isForbidden && (strings.Contains(lowered, errQuotaMessageString) || strings.Contains(lowered, errQuotaUnknownMessageString)) { + return true + } + } + return false +} + +// IsErrorLimitExceeded returns true if the given error is a limit error. +func IsErrorLimitExceeded(err error) bool { + if isForbidden := apierrs.IsForbidden(err); isForbidden || apierrs.IsInvalid(err) { + lowered := strings.ToLower(err.Error()) + // the limit error message can be accompanied only by Invalid reason + if strings.Contains(lowered, errLimitsMessageString) { + return true + } + } + return false +} diff --git a/vendor/github.com/openshift/library-go/pkg/quota/quotautil/helpers.go b/vendor/github.com/openshift/library-go/pkg/quota/quotautil/helpers.go new file mode 100644 index 0000000000000..a6bfc6269ed6d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/quota/quotautil/helpers.go @@ -0,0 +1,48 @@ +package quotautil + +import ( + corev1 "k8s.io/api/core/v1" + + quotav1 "github.com/openshift/api/quota/v1" +) + +func GetResourceQuotasStatusByNamespace(namespaceStatuses quotav1.ResourceQuotasStatusByNamespace, namespace string) (corev1.ResourceQuotaStatus, bool) { + for i := range namespaceStatuses { + curr := namespaceStatuses[i] + if curr.Namespace == namespace { + return curr.Status, true + } + } + return corev1.ResourceQuotaStatus{}, false +} + +func RemoveResourceQuotasStatusByNamespace(namespaceStatuses *quotav1.ResourceQuotasStatusByNamespace, namespace string) { + newNamespaceStatuses := quotav1.ResourceQuotasStatusByNamespace{} + for i := range *namespaceStatuses { + curr := (*namespaceStatuses)[i] + if curr.Namespace == namespace { + continue + } + newNamespaceStatuses = append(newNamespaceStatuses, curr) + } + *namespaceStatuses = newNamespaceStatuses +} + +func InsertResourceQuotasStatus(namespaceStatuses *quotav1.ResourceQuotasStatusByNamespace, newStatus quotav1.ResourceQuotaStatusByNamespace) { + newNamespaceStatuses := quotav1.ResourceQuotasStatusByNamespace{} + found := false + for i := range *namespaceStatuses { + curr := (*namespaceStatuses)[i] + if curr.Namespace == newStatus.Namespace { + // do this so that we don't change serialization order + newNamespaceStatuses = append(newNamespaceStatuses, newStatus) + found = true + continue + } + newNamespaceStatuses = append(newNamespaceStatuses, curr) + } + if !found { + newNamespaceStatuses = append(newNamespaceStatuses, newStatus) + } + *namespaceStatuses = newNamespaceStatuses +} diff --git a/vendor/github.com/openshift/library-go/pkg/route/OWNERS b/vendor/github.com/openshift/library-go/pkg/route/OWNERS new file mode 100644 index 0000000000000..e79f31e9610f7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/route/OWNERS @@ -0,0 +1,14 @@ +approvers: + - frobware + - knobunc + - Miciah + - candita +reviewers: + - frobware + - knobunc + - Miciah + - candita + - alebedev87 +emeritus_approvers: + - smarterclayton +component: Routing diff --git a/vendor/github.com/openshift/library-go/pkg/route/common.go b/vendor/github.com/openshift/library-go/pkg/route/common.go new file mode 100644 index 0000000000000..bd378da01c617 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/route/common.go @@ -0,0 +1,23 @@ +package route + +import ( + "context" + + authorizationv1 "k8s.io/api/authorization/v1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// SubjectAccessReviewCreator is an interface for performing subject access reviews +type SubjectAccessReviewCreator interface { + Create(ctx context.Context, sar *authorizationv1.SubjectAccessReview, opts metav1.CreateOptions) (*authorizationv1.SubjectAccessReview, error) +} + +// RouteValidationOptions used to tweak how/what fields are validated. These +// options are propagated by the apiserver. +type RouteValidationOptions struct { + + // AllowExternalCertificates option is set when the RouteExternalCertificate + // feature gate is enabled. + AllowExternalCertificates bool +} diff --git a/vendor/github.com/openshift/library-go/pkg/route/defaulting/defaults.go b/vendor/github.com/openshift/library-go/pkg/route/defaulting/defaults.go new file mode 100644 index 0000000000000..4e0eeb459b25d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/route/defaulting/defaults.go @@ -0,0 +1,45 @@ +package defaulting + +import ( + v1 "github.com/openshift/api/route/v1" +) + +// If adding or changing route defaults, updates may be required to +// pkg/router/controller/controller.go to ensure the routes generated from +// ingress resources will match routes created via the api. + +func SetDefaults_RouteSpec(obj *v1.RouteSpec) { + if len(obj.WildcardPolicy) == 0 { + obj.WildcardPolicy = v1.WildcardPolicyNone + } +} + +func SetDefaults_RouteTargetReference(obj *v1.RouteTargetReference) { + if len(obj.Kind) == 0 { + obj.Kind = "Service" + } + if obj.Weight == nil { + obj.Weight = new(int32) + *obj.Weight = 100 + } +} + +func SetDefaults_TLSConfig(obj *v1.TLSConfig) { + if len(obj.Termination) == 0 && len(obj.DestinationCACertificate) == 0 { + obj.Termination = v1.TLSTerminationEdge + } + switch obj.Termination { + case v1.TLSTerminationType("Reencrypt"): + obj.Termination = v1.TLSTerminationReencrypt + case v1.TLSTerminationType("Edge"): + obj.Termination = v1.TLSTerminationEdge + case v1.TLSTerminationType("Passthrough"): + obj.Termination = v1.TLSTerminationPassthrough + } +} + +func SetDefaults_RouteIngress(obj *v1.RouteIngress) { + if len(obj.WildcardPolicy) == 0 { + obj.WildcardPolicy = v1.WildcardPolicyNone + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/route/hostassignment/assignment.go b/vendor/github.com/openshift/library-go/pkg/route/hostassignment/assignment.go new file mode 100644 index 0000000000000..81cd5a656d1c8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/route/hostassignment/assignment.go @@ -0,0 +1,267 @@ +package hostassignment + +import ( + "context" + "fmt" + + authorizationv1 "k8s.io/api/authorization/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/endpoints/request" + + routev1 "github.com/openshift/api/route/v1" + "github.com/openshift/library-go/pkg/authorization/authorizationutil" + "github.com/openshift/library-go/pkg/route" +) + +const ( + // HostGeneratedAnnotationKey is the key for an annotation set to "true" + // if the route's host was generated. + HostGeneratedAnnotationKey = "openshift.io/host.generated" + // routeHostPermissionErrMsg is the error message for trying to set a + // route's spec.host field without the required permission. + routeHostPermissionErrMsg = "you do not have permission to set the host field of the route" + // routeSubdomainPermissionErrMsg is the error message for trying to set + // a route's spec.subdomain field without the required permission. + routeSubdomainPermissionErrMsg = "you do not have permission to set the subdomain field of the route" + // routeTLSPermissionErrMsg is the error message for trying to set a + // route's spec.tls field or subfields without the required permission. + routeTLSPermissionErrMsg = "you do not have permission to set certificate fields on the route" +) + +type HostnameGenerator interface { + GenerateHostname(*routev1.Route) (string, error) +} + +// AllocateHost allocates a host name ONLY if the route doesn't specify a subdomain wildcard policy and +// the host name on the route is empty and an allocator is configured. +// It must first allocate the shard and may return an error if shard allocation fails. +func AllocateHost(ctx context.Context, route *routev1.Route, sarc route.SubjectAccessReviewCreator, routeAllocator HostnameGenerator, opts route.RouteValidationOptions) field.ErrorList { + hostSet := len(route.Spec.Host) > 0 + certSet := route.Spec.TLS != nil && + (len(route.Spec.TLS.CACertificate) > 0 || + len(route.Spec.TLS.Certificate) > 0 || + len(route.Spec.TLS.DestinationCACertificate) > 0 || + len(route.Spec.TLS.Key) > 0) + + if opts.AllowExternalCertificates && route.Spec.TLS != nil && route.Spec.TLS.ExternalCertificate != nil { + certSet = certSet || len(route.Spec.TLS.ExternalCertificate.Name) > 0 + } + + if hostSet || certSet { + user, ok := request.UserFrom(ctx) + if !ok { + return field.ErrorList{field.InternalError(field.NewPath("spec", "host"), fmt.Errorf("unable to verify host field can be set"))} + } + res, err := sarc.Create( + ctx, + authorizationutil.AddUserToSAR( + user, + &authorizationv1.SubjectAccessReview{ + Spec: authorizationv1.SubjectAccessReviewSpec{ + ResourceAttributes: &authorizationv1.ResourceAttributes{ + Namespace: request.NamespaceValue(ctx), + Verb: "create", + Group: routev1.GroupName, + Resource: "routes", + Subresource: "custom-host", + }, + }, + }, + ), + metav1.CreateOptions{}, + ) + if err != nil { + return field.ErrorList{field.InternalError(field.NewPath("spec", "host"), err)} + } + if !res.Status.Allowed { + if hostSet { + return field.ErrorList{field.Forbidden(field.NewPath("spec", "host"), routeHostPermissionErrMsg)} + } + return field.ErrorList{field.Forbidden(field.NewPath("spec", "tls"), routeTLSPermissionErrMsg)} + } + } + + if route.Spec.WildcardPolicy == routev1.WildcardPolicySubdomain { + // Don't allocate a host if subdomain wildcard policy. + return nil + } + + if len(route.Spec.Subdomain) == 0 && len(route.Spec.Host) == 0 && routeAllocator != nil { + // TODO: this does not belong here, and should be removed + host, err := routeAllocator.GenerateHostname(route) + if err != nil { + return field.ErrorList{field.InternalError(field.NewPath("spec", "host"), fmt.Errorf("allocation error: %v for route: %#v", err, route))} + } + route.Spec.Host = host + if route.Annotations == nil { + route.Annotations = map[string]string{} + } + route.Annotations[HostGeneratedAnnotationKey] = "true" + } + return nil +} + +func hasCertificateInfo(tls *routev1.TLSConfig, opts route.RouteValidationOptions) bool { + if tls == nil { + return false + } + hasInfo := len(tls.Certificate) > 0 || + len(tls.Key) > 0 || + len(tls.CACertificate) > 0 || + len(tls.DestinationCACertificate) > 0 + + if opts.AllowExternalCertificates && tls.ExternalCertificate != nil { + hasInfo = hasInfo || len(tls.ExternalCertificate.Name) > 0 + } + return hasInfo +} + +// certificateChangeRequiresAuth determines whether changes to the TLS certificate configuration require authentication. +// Note: If (newer/updated) route uses externalCertificate, this function always returns true, as we cannot definitively verify if +// the content of the referenced secret has been modified. Even if the secret name remains the same, +// we must assume that the secret content is changed, necessitating authorization. +func certificateChangeRequiresAuth(route, older *routev1.Route, opts route.RouteValidationOptions) bool { + switch { + case route.Spec.TLS != nil && older.Spec.TLS != nil: + a, b := route.Spec.TLS, older.Spec.TLS + if !hasCertificateInfo(a, opts) { + // removing certificate info is allowed + return false + } + + certChanged := a.CACertificate != b.CACertificate || + a.Certificate != b.Certificate || + a.DestinationCACertificate != b.DestinationCACertificate || + a.Key != b.Key + + if opts.AllowExternalCertificates { + if route.Spec.TLS.ExternalCertificate != nil { + certChanged = true + } + } + + return certChanged + case route.Spec.TLS != nil: + // using any default certificate is allowed + return hasCertificateInfo(route.Spec.TLS, opts) + default: + // all other cases we are not adding additional certificate info + return false + } +} + +// validateImmutableField is equivalent to apimachinery.ValidateImmutableField +// except that it uses a custom error message. +func validateImmutableField(newVal, oldVal interface{}, fldPath *field.Path, errMsg string) field.ErrorList { + allErrs := field.ErrorList{} + if !apiequality.Semantic.DeepEqual(oldVal, newVal) { + allErrs = append(allErrs, field.Invalid(fldPath, newVal, errMsg)) + } + return allErrs +} + +// ValidateHostUpdate checks if the user has the correct permissions based on the updates +// done to the route object. If the route's host/subdomain has been updated it checks if +// the user has "update" permission on custom-host subresource. If only the certificate +// has changed, it checks if the user has "create" permission on the custom-host subresource. +// +// Which means "update" permission is required to change host/subdomain and +// either "create" or "update" permission is required to change certificate. +// Removing certificate info is allowed without any permission. +// https://github.com/openshift/origin/pull/18177#issuecomment-360660024. +// +// Caveat here is that if the (newer/updated) route uses externalCertificate, +// the certChanged condition will always be true (even when the secret name remains unchanged), +// since we cannot verify state of external secret object. +// Due to this it proceeds with the assumption that the certificate has changed +// when the route has externalCertificate set. +func ValidateHostUpdate(ctx context.Context, route, older *routev1.Route, sarc route.SubjectAccessReviewCreator, opts route.RouteValidationOptions) field.ErrorList { + hostChanged := route.Spec.Host != older.Spec.Host + subdomainChanged := route.Spec.Subdomain != older.Spec.Subdomain + certChanged := certificateChangeRequiresAuth(route, older, opts) + if !hostChanged && !certChanged && !subdomainChanged { + return nil + } + user, ok := request.UserFrom(ctx) + if !ok { + return field.ErrorList{field.InternalError(field.NewPath("spec", "host"), fmt.Errorf("unable to verify host field can be changed"))} + } + res, err := sarc.Create( + ctx, + authorizationutil.AddUserToSAR( + user, + &authorizationv1.SubjectAccessReview{ + Spec: authorizationv1.SubjectAccessReviewSpec{ + ResourceAttributes: &authorizationv1.ResourceAttributes{ + Namespace: request.NamespaceValue(ctx), + Verb: "update", + Group: routev1.GroupName, + Resource: "routes", + Subresource: "custom-host", + }, + }, + }, + ), + metav1.CreateOptions{}, + ) + if err != nil { + if subdomainChanged { + return field.ErrorList{field.InternalError(field.NewPath("spec", "subdomain"), err)} + } + return field.ErrorList{field.InternalError(field.NewPath("spec", "host"), err)} + } + if !res.Status.Allowed { + if hostChanged { + return field.ErrorList{field.Invalid(field.NewPath("spec", "host"), route.Spec.Host, routeHostPermissionErrMsg)} + } + if subdomainChanged { + return field.ErrorList{field.Invalid(field.NewPath("spec", "subdomain"), route.Spec.Subdomain, routeSubdomainPermissionErrMsg)} + } + + // if tls is being updated without host being updated, we check if 'create' permission exists on custom-host subresource + res, err := sarc.Create( + ctx, + authorizationutil.AddUserToSAR( + user, + &authorizationv1.SubjectAccessReview{ + Spec: authorizationv1.SubjectAccessReviewSpec{ + ResourceAttributes: &authorizationv1.ResourceAttributes{ + Namespace: request.NamespaceValue(ctx), + Verb: "create", + Group: routev1.GroupName, + Resource: "routes", + Subresource: "custom-host", + }, + }, + }, + ), + metav1.CreateOptions{}, + ) + if err != nil { + return field.ErrorList{field.InternalError(field.NewPath("spec", "host"), err)} + } + if !res.Status.Allowed { + if route.Spec.TLS == nil || older.Spec.TLS == nil { + return validateImmutableField(route.Spec.TLS, older.Spec.TLS, field.NewPath("spec", "tls"), routeTLSPermissionErrMsg) + } + errs := validateImmutableField(route.Spec.TLS.CACertificate, older.Spec.TLS.CACertificate, field.NewPath("spec", "tls", "caCertificate"), routeTLSPermissionErrMsg) + errs = append(errs, validateImmutableField(route.Spec.TLS.Certificate, older.Spec.TLS.Certificate, field.NewPath("spec", "tls", "certificate"), routeTLSPermissionErrMsg)...) + errs = append(errs, validateImmutableField(route.Spec.TLS.DestinationCACertificate, older.Spec.TLS.DestinationCACertificate, field.NewPath("spec", "tls", "destinationCACertificate"), routeTLSPermissionErrMsg)...) + errs = append(errs, validateImmutableField(route.Spec.TLS.Key, older.Spec.TLS.Key, field.NewPath("spec", "tls", "key"), routeTLSPermissionErrMsg)...) + + if opts.AllowExternalCertificates { + if route.Spec.TLS.ExternalCertificate == nil || older.Spec.TLS.ExternalCertificate == nil { + errs = append(errs, validateImmutableField(route.Spec.TLS.ExternalCertificate, older.Spec.TLS.ExternalCertificate, field.NewPath("spec", "tls", "externalCertificate"), routeTLSPermissionErrMsg)...) + } else { + // since the state of the external secret cannot be verified, return error (even when secret name remains unchanged) + // without performing immutability checks, if externalCertificate is set. + errs = append(errs, field.Invalid(field.NewPath("spec", "tls", "externalCertificate"), route.Spec.TLS.ExternalCertificate, routeTLSPermissionErrMsg)) + } + } + return errs + } + } + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/route/hostassignment/plugin.go b/vendor/github.com/openshift/library-go/pkg/route/hostassignment/plugin.go new file mode 100644 index 0000000000000..e936bc66c4871 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/route/hostassignment/plugin.go @@ -0,0 +1,46 @@ +package hostassignment + +import ( + "fmt" + "strings" + + kvalidation "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/klog/v2" + + routev1 "github.com/openshift/api/route/v1" +) + +// Default DNS suffix to use if no configuration is passed to this plugin. +const defaultDNSSuffix = "router.default.svc.cluster.local" + +// SimpleAllocationPlugin implements the route.AllocationPlugin interface +// to provide a simple unsharded (or single sharded) allocation plugin. +type SimpleAllocationPlugin struct { + DNSSuffix string +} + +// NewSimpleAllocationPlugin creates a new SimpleAllocationPlugin. +func NewSimpleAllocationPlugin(suffix string) (*SimpleAllocationPlugin, error) { + if len(suffix) == 0 { + suffix = defaultDNSSuffix + } + + klog.V(4).Infof("Route plugin initialized with suffix=%s", suffix) + + // Check that the DNS suffix is valid. + if len(kvalidation.IsDNS1123Subdomain(suffix)) != 0 { + return nil, fmt.Errorf("invalid DNS suffix: %s", suffix) + } + + return &SimpleAllocationPlugin{DNSSuffix: suffix}, nil +} + +// GenerateHostname generates a host name for a route - using the service name, +// namespace (if provided) and the router shard dns suffix. +// TODO: move to router code, and have the routers set this back on the route status. +func (p *SimpleAllocationPlugin) GenerateHostname(route *routev1.Route) (string, error) { + if len(route.Name) == 0 || len(route.Namespace) == 0 { + return "", nil + } + return fmt.Sprintf("%s-%s.%s", strings.Replace(route.Name, ".", "-", -1), route.Namespace, p.DNSSuffix), nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/route/validation/validation.go b/vendor/github.com/openshift/library-go/pkg/route/validation/validation.go new file mode 100644 index 0000000000000..a3896006f9540 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/route/validation/validation.go @@ -0,0 +1,533 @@ +package validation + +import ( + "context" + "fmt" + "regexp" + "slices" + "strings" + + authorizationv1 "k8s.io/api/authorization/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + kvalidation "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/authentication/user" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + + routev1 "github.com/openshift/api/route/v1" + "github.com/openshift/library-go/pkg/authorization/authorizationutil" + routecommon "github.com/openshift/library-go/pkg/route" +) + +const ( + // maxHeaderNameSize is the maximum allowed length of an HTTP header + // name. + maxHeaderNameSize = 255 + // maxHeaderValueSize is the maximum allowed length of an HTTP header + // value. + maxHeaderValueSize = 16384 + // maxResponseHeaderList is the maximum allowed number of HTTP response + // header actions. + maxResponseHeaderList = 20 + // maxRequestHeaderList is the maximum allowed number of HTTP request + // header actions. + maxRequestHeaderList = 20 + // permittedHeaderNameErrorMessage is the API validation message for an + // invalid HTTP header name. + permittedHeaderNameErrorMessage = "name must be a valid HTTP header name as defined in RFC 2616 section 4.2" + // permittedHeaderValueTemplate is used in the definitions of + // permittedRequestHeaderValueRE and permittedResponseHeaderValueRE. + // Any changes made to these regex patterns must be reflected in the + // corresponding regexps in + // https://github.com/openshift/api/blob/master/route/v1/types.go and + // https://github.com/openshift/api/blob/master/operator/v1/types_ingress.go + // for the Route.spec.httpHeaders.actions[*].response, + // Route.spec.httpHeaders.actions[*].request, + // IngressController.spec.httpHeaders.actions[*].response, and + // IngressController.spec.httpHeaders.actions[*].request fields for the + // benefit of client-side validation. + permittedHeaderValueTemplate = `^(?:%(?:%|(?:\{[-+]?[QXE](?:,[-+]?[QXE])*\})?\[(?:XYZ\.hdr\([0-9A-Za-z-]+\)|ssl_c_der)(?:,(?:lower|base64))*\])|[^%[:cntrl:]])+$` + // permittedRequestHeaderValueErrorMessage is the API validation message + // for an invalid HTTP request header value. + permittedRequestHeaderValueErrorMessage = "Either header value provided is not in correct format or the converter specified is not allowed. The dynamic header value may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2 Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64." + // permittedResponseHeaderValueErrorMessage is the API validation + // message for an invalid HTTP response header value. + permittedResponseHeaderValueErrorMessage = "Either header value provided is not in correct format or the converter specified is not allowed. The dynamic header value may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2 Sample fetchers allowed are res.hdr, ssl_c_der. Converters allowed are lower, base64." + // routerServiceAccount is used to validate RBAC permissions for externalCertificate + routerServiceAccount = "system:serviceaccount:openshift-ingress:router" +) + +var ( + // validateRouteName is a ValidateNameFunc for validating a route name. + validateRouteName = apimachineryvalidation.NameIsDNSSubdomain + // permittedHeaderNameRE is a compiled regexp for validating an HTTP + // header name. + permittedHeaderNameRE = regexp.MustCompile("^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$") + // permittedRequestHeaderValueRE is a compiled regexp for validating an + // HTTP request header value. + permittedRequestHeaderValueRE = regexp.MustCompile(strings.Replace(permittedHeaderValueTemplate, "XYZ", "req", 1)) + // permittedResponseHeaderValueRE is a compiled regexp for validating an + // HTTP response header value. + permittedResponseHeaderValueRE = regexp.MustCompile(strings.Replace(permittedHeaderValueTemplate, "XYZ", "res", 1)) +) + +func ValidateRoute(ctx context.Context, route *routev1.Route, sarCreator routecommon.SubjectAccessReviewCreator, secretsGetter corev1client.SecretsGetter, opts routecommon.RouteValidationOptions) field.ErrorList { + return validateRoute(ctx, route, true, sarCreator, secretsGetter, opts) +} + +// validLabels - used in the ValidateRouteUpdate function to check if "older" routes conform to DNS1123Labels or not +func validLabels(host string) bool { + if len(host) == 0 { + return true + } + return checkLabelSegments(host) +} + +// checkLabelSegments - function that checks if hostname labels conform to DNS1123Labels +func checkLabelSegments(host string) bool { + segments := strings.Split(host, ".") + for _, s := range segments { + errs := kvalidation.IsDNS1123Label(s) + if len(errs) > 0 { + return false + } + } + return true +} + +// validateRoute - private function to validate route +func validateRoute(ctx context.Context, route *routev1.Route, checkHostname bool, sarc routecommon.SubjectAccessReviewCreator, secrets corev1client.SecretsGetter, opts routecommon.RouteValidationOptions) field.ErrorList { + //ensure meta is set properly + result := validateObjectMeta(&route.ObjectMeta, true, validateRouteName, field.NewPath("metadata")) + + specPath := field.NewPath("spec") + + //host is not required but if it is set ensure it meets DNS requirements + if len(route.Spec.Host) > 0 { + if len(kvalidation.IsDNS1123Subdomain(route.Spec.Host)) != 0 { + result = append(result, field.Invalid(specPath.Child("host"), route.Spec.Host, "host must conform to DNS 952 subdomain conventions")) + } + + // Check the hostname only if the old route did not have an invalid DNS1123Label + // and the new route cares about DNS compliant labels. + if checkHostname && route.Annotations[routev1.AllowNonDNSCompliantHostAnnotation] != "true" { + segments := strings.Split(route.Spec.Host, ".") + for _, s := range segments { + errs := kvalidation.IsDNS1123Label(s) + for _, e := range errs { + result = append(result, field.Invalid(specPath.Child("host"), route.Spec.Host, e)) + } + } + } + } + + if len(route.Spec.Subdomain) > 0 { + // Subdomain is not lenient because it was never used outside of + // routes. + // + // TODO: Use ValidateSubdomain from library-go. + if len(route.Spec.Subdomain) > kvalidation.DNS1123SubdomainMaxLength { + result = append(result, field.Invalid(field.NewPath("spec.subdomain"), route.Spec.Subdomain, kvalidation.MaxLenError(kvalidation.DNS1123SubdomainMaxLength))) + } + for _, label := range strings.Split(route.Spec.Subdomain, ".") { + if errs := kvalidation.IsDNS1123Label(label); len(errs) > 0 { + result = append(result, field.Invalid(field.NewPath("spec.subdomain"), label, strings.Join(errs, ", "))) + } + } + } + + if err := validateWildcardPolicy(route.Spec.Host, route.Spec.WildcardPolicy, specPath.Child("wildcardPolicy")); err != nil { + result = append(result, err) + } + + if route.Spec.HTTPHeaders != nil { + if len(route.Spec.HTTPHeaders.Actions.Response) != 0 || len(route.Spec.HTTPHeaders.Actions.Request) != 0 { + if route.Spec.TLS != nil && route.Spec.TLS.Termination == routev1.TLSTerminationPassthrough { + result = append(result, field.Invalid(field.NewPath("spec", "tls", "termination"), route.Spec.TLS.Termination, "only edge and re-encrypt routes are supported for providing customized headers.")) + } + } + actionsPath := field.NewPath("spec", "httpHeaders", "actions") + if len(route.Spec.HTTPHeaders.Actions.Response) > maxResponseHeaderList { + result = append(result, field.Invalid(actionsPath.Child("response"), route.Spec.HTTPHeaders.Actions.Response, fmt.Sprintf("response headers list can't exceed %d items", maxResponseHeaderList))) + } else { + result = append(result, validateHeaders(actionsPath.Child("response"), route.Spec.HTTPHeaders.Actions.Response, permittedResponseHeaderValueRE, permittedResponseHeaderValueErrorMessage)...) + } + + if len(route.Spec.HTTPHeaders.Actions.Request) > maxRequestHeaderList { + result = append(result, field.Invalid(actionsPath.Child("request"), route.Spec.HTTPHeaders.Actions.Request, fmt.Sprintf("request headers list can't exceed %d items", maxRequestHeaderList))) + } else { + result = append(result, validateHeaders(actionsPath.Child("request"), route.Spec.HTTPHeaders.Actions.Request, permittedRequestHeaderValueRE, permittedRequestHeaderValueErrorMessage)...) + } + } + + if len(route.Spec.Path) > 0 && !strings.HasPrefix(route.Spec.Path, "/") { + result = append(result, field.Invalid(specPath.Child("path"), route.Spec.Path, "path must begin with /")) + } + + if len(route.Spec.Path) > 0 && route.Spec.TLS != nil && + route.Spec.TLS.Termination == routev1.TLSTerminationPassthrough { + result = append(result, field.Invalid(specPath.Child("path"), route.Spec.Path, "passthrough termination does not support paths")) + } + + if len(route.Spec.To.Name) == 0 { + result = append(result, field.Required(specPath.Child("to", "name"), "")) + } + if route.Spec.To.Kind != "Service" { + result = append(result, field.Invalid(specPath.Child("to", "kind"), route.Spec.To.Kind, "must reference a Service")) + } + if route.Spec.To.Weight != nil && (*route.Spec.To.Weight < 0 || *route.Spec.To.Weight > 256) { + result = append(result, field.Invalid(specPath.Child("to", "weight"), route.Spec.To.Weight, "weight must be an integer between 0 and 256")) + } + + backendPath := specPath.Child("alternateBackends") + if len(route.Spec.AlternateBackends) > 3 { + result = append(result, field.Required(backendPath, "cannot specify more than 3 alternate backends")) + } + for i, svc := range route.Spec.AlternateBackends { + if len(svc.Name) == 0 { + result = append(result, field.Required(backendPath.Index(i).Child("name"), "")) + } + if svc.Kind != "Service" { + result = append(result, field.Invalid(backendPath.Index(i).Child("kind"), svc.Kind, "must reference a Service")) + } + if svc.Weight != nil && (*svc.Weight < 0 || *svc.Weight > 256) { + result = append(result, field.Invalid(backendPath.Index(i).Child("weight"), svc.Weight, "weight must be an integer between 0 and 256")) + } + } + + if route.Spec.Port != nil { + switch target := route.Spec.Port.TargetPort; { + case target.Type == intstr.Int && target.IntVal == 0, + target.Type == intstr.String && len(target.StrVal) == 0: + result = append(result, field.Required(specPath.Child("port", "targetPort"), "")) + } + } + + if errs := validateTLS(ctx, route, specPath.Child("tls"), sarc, secrets, opts); len(errs) != 0 { + result = append(result, errs...) + } + + return result +} + +func ValidateRouteUpdate(ctx context.Context, route *routev1.Route, older *routev1.Route, sarc routecommon.SubjectAccessReviewCreator, secrets corev1client.SecretsGetter, opts routecommon.RouteValidationOptions) field.ErrorList { + allErrs := validateObjectMetaUpdate(&route.ObjectMeta, &older.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, apimachineryvalidation.ValidateImmutableField(route.Spec.WildcardPolicy, older.Spec.WildcardPolicy, field.NewPath("spec", "wildcardPolicy"))...) + hostnameUpdated := route.Spec.Host != older.Spec.Host + allErrs = append(allErrs, validateRoute(ctx, route, hostnameUpdated && validLabels(older.Spec.Host), sarc, secrets, opts)...) + return allErrs +} + +// ValidateRouteStatusUpdate validates status updates for routes. +// +// Note that this function shouldn't call ValidateRouteUpdate, otherwise +// we are risking to break existing routes. +func ValidateRouteStatusUpdate(route *routev1.Route, older *routev1.Route) field.ErrorList { + allErrs := validateObjectMetaUpdate(&route.ObjectMeta, &older.ObjectMeta, field.NewPath("metadata")) + + // TODO: validate route status + return allErrs +} + +// validateTLS tests fields for different types of TLS combinations are set. Called +// by ValidateRoute. +func validateTLS(ctx context.Context, route *routev1.Route, fldPath *field.Path, sarc routecommon.SubjectAccessReviewCreator, secrets corev1client.SecretsGetter, opts routecommon.RouteValidationOptions) field.ErrorList { + result := field.ErrorList{} + tls := route.Spec.TLS + + // no tls config present, no need for validation + if tls == nil { + return nil + } + + // in all cases certificate and externalCertificate must not be specified at the same time + switch tls.Termination { + // reencrypt may specify destination ca cert + // externalCert, cert, key, cacert may not be specified because the route may be a wildcard + case routev1.TLSTerminationReencrypt: + if opts.AllowExternalCertificates && tls.ExternalCertificate != nil { + if len(tls.Certificate) > 0 && len(tls.ExternalCertificate.Name) > 0 { + result = append(result, field.Invalid(fldPath.Child("externalCertificate"), tls.ExternalCertificate.Name, "cannot specify both tls.certificate and tls.externalCertificate")) + } else if len(tls.ExternalCertificate.Name) > 0 { + errs := validateTLSExternalCertificate(ctx, route, fldPath.Child("externalCertificate"), sarc, secrets) + result = append(result, errs...) + } + } + //passthrough term should not specify any cert + case routev1.TLSTerminationPassthrough: + if len(tls.Certificate) > 0 { + result = append(result, field.Invalid(fldPath.Child("certificate"), "redacted certificate data", "passthrough termination does not support certificates")) + } + + if len(tls.Key) > 0 { + result = append(result, field.Invalid(fldPath.Child("key"), "redacted key data", "passthrough termination does not support certificates")) + } + + if opts.AllowExternalCertificates && tls.ExternalCertificate != nil { + if len(tls.ExternalCertificate.Name) > 0 { + result = append(result, field.Invalid(fldPath.Child("externalCertificate"), tls.ExternalCertificate.Name, "passthrough termination does not support certificates")) + } + } + + if len(tls.CACertificate) > 0 { + result = append(result, field.Invalid(fldPath.Child("caCertificate"), "redacted ca certificate data", "passthrough termination does not support certificates")) + } + + if len(tls.DestinationCACertificate) > 0 { + result = append(result, field.Invalid(fldPath.Child("destinationCACertificate"), "redacted destination ca certificate data", "passthrough termination does not support certificates")) + } + // edge cert should only specify cert, key, and cacert but those certs + // may not be specified if the route is a wildcard route + case routev1.TLSTerminationEdge: + if len(tls.DestinationCACertificate) > 0 { + result = append(result, field.Invalid(fldPath.Child("destinationCACertificate"), "redacted destination ca certificate data", "edge termination does not support destination certificates")) + } + + if opts.AllowExternalCertificates && tls.ExternalCertificate != nil { + if len(tls.Certificate) > 0 && len(tls.ExternalCertificate.Name) > 0 { + result = append(result, field.Invalid(fldPath.Child("externalCertificate"), tls.ExternalCertificate.Name, "cannot specify both tls.certificate and tls.externalCertificate")) + } else if len(tls.ExternalCertificate.Name) > 0 { + errs := validateTLSExternalCertificate(ctx, route, fldPath.Child("externalCertificate"), sarc, secrets) + result = append(result, errs...) + } + } + + default: + validValues := []string{string(routev1.TLSTerminationEdge), string(routev1.TLSTerminationPassthrough), string(routev1.TLSTerminationReencrypt)} + result = append(result, field.NotSupported(fldPath.Child("termination"), tls.Termination, validValues)) + } + + if err := validateInsecureEdgeTerminationPolicy(tls, fldPath.Child("insecureEdgeTerminationPolicy")); err != nil { + result = append(result, err) + } + + return result +} + +// validateTLSExternalCertificate tests different pre-conditions required for +// using externalCertificate. Called by validateTLS. +func validateTLSExternalCertificate(ctx context.Context, route *routev1.Route, fldPath *field.Path, sarc routecommon.SubjectAccessReviewCreator, secretsGetter corev1client.SecretsGetter) field.ErrorList { + tls := route.Spec.TLS + var errs field.ErrorList + + // The router serviceaccount must have permission to get/list/watch the referenced secret. + // The role and rolebinding to provide this access must be provided by the user. + if err := authorizationutil.Authorize(sarc, &user.DefaultInfo{Name: routerServiceAccount}, + &authorizationv1.ResourceAttributes{ + Namespace: route.Namespace, + Verb: "get", + Resource: "secrets", + Name: tls.ExternalCertificate.Name, + }); err != nil { + errs = append(errs, field.Forbidden(fldPath, "router serviceaccount does not have permission to get this secret")) + } + + if err := authorizationutil.Authorize(sarc, &user.DefaultInfo{Name: routerServiceAccount}, + &authorizationv1.ResourceAttributes{ + Namespace: route.Namespace, + Verb: "watch", + Resource: "secrets", + Name: tls.ExternalCertificate.Name, + }); err != nil { + errs = append(errs, field.Forbidden(fldPath, "router serviceaccount does not have permission to watch this secret")) + } + + if err := authorizationutil.Authorize(sarc, &user.DefaultInfo{Name: routerServiceAccount}, + &authorizationv1.ResourceAttributes{ + Namespace: route.Namespace, + Verb: "list", + Resource: "secrets", + Name: tls.ExternalCertificate.Name, + }); err != nil { + errs = append(errs, field.Forbidden(fldPath, "router serviceaccount does not have permission to list this secret")) + } + + // The secret should be in the same namespace as that of the route. + secret, err := secretsGetter.Secrets(route.Namespace).Get(ctx, tls.ExternalCertificate.Name, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + return append(errs, field.NotFound(fldPath, err.Error())) + } + return append(errs, field.InternalError(fldPath, err)) + } + + // The secret should be of type kubernetes.io/tls + if secret.Type != corev1.SecretTypeTLS { + errs = append(errs, field.Invalid(fldPath, tls.ExternalCertificate.Name, fmt.Sprintf("secret of type %q required", corev1.SecretTypeTLS))) + } + + return errs +} + +// validateInsecureEdgeTerminationPolicy tests fields for different types of +// insecure options. Called by validateTLS. +func validateInsecureEdgeTerminationPolicy(tls *routev1.TLSConfig, fldPath *field.Path) *field.Error { + // Check insecure option value if specified (empty is ok). + if len(tls.InsecureEdgeTerminationPolicy) == 0 { + return nil + } + + // It is an edge-terminated or reencrypt route, check insecure option value is + // one of None(for disable), Allow or Redirect. + allowedValues := map[routev1.InsecureEdgeTerminationPolicyType]struct{}{ + routev1.InsecureEdgeTerminationPolicyNone: {}, + routev1.InsecureEdgeTerminationPolicyAllow: {}, + routev1.InsecureEdgeTerminationPolicyRedirect: {}, + } + + switch tls.Termination { + case routev1.TLSTerminationReencrypt: + fallthrough + case routev1.TLSTerminationEdge: + if _, ok := allowedValues[tls.InsecureEdgeTerminationPolicy]; !ok { + msg := fmt.Sprintf("invalid value for InsecureEdgeTerminationPolicy option, acceptable values are %s, %s, %s, or empty", routev1.InsecureEdgeTerminationPolicyNone, routev1.InsecureEdgeTerminationPolicyAllow, routev1.InsecureEdgeTerminationPolicyRedirect) + return field.Invalid(fldPath, tls.InsecureEdgeTerminationPolicy, msg) + } + case routev1.TLSTerminationPassthrough: + if routev1.InsecureEdgeTerminationPolicyNone != tls.InsecureEdgeTerminationPolicy && routev1.InsecureEdgeTerminationPolicyRedirect != tls.InsecureEdgeTerminationPolicy { + msg := fmt.Sprintf("invalid value for InsecureEdgeTerminationPolicy option, acceptable values are %s, %s, or empty", routev1.InsecureEdgeTerminationPolicyNone, routev1.InsecureEdgeTerminationPolicyRedirect) + return field.Invalid(fldPath, tls.InsecureEdgeTerminationPolicy, msg) + } + } + + return nil +} + +var ( + allowedWildcardPolicies = []routev1.WildcardPolicyType{routev1.WildcardPolicyNone, routev1.WildcardPolicySubdomain} +) + +// validateWildcardPolicy tests that the wildcard policy is either empty or one of the supported types. +func validateWildcardPolicy(host string, policy routev1.WildcardPolicyType, fldPath *field.Path) *field.Error { + if len(policy) == 0 { + return nil + } + + // Check if policy is one of None or Subdomain. + if !slices.Contains(allowedWildcardPolicies, policy) { + return field.NotSupported(fldPath, policy, allowedWildcardPolicies) + } + + if policy == routev1.WildcardPolicySubdomain && len(host) == 0 { + return field.Invalid(fldPath, policy, "host name not specified for wildcard policy") + } + + return nil +} + +var ( + notAllowedHTTPHeaders = []string{"strict-transport-security", "proxy", "cookie", "set-cookie"} + notAllowedHTTPHeadersMessage = fmt.Sprintf("the following headers may not be modified using this API: %v", strings.Join(notAllowedHTTPHeaders, ", ")) +) + +// validateHeaders verifies that the given slice of request or response headers +// is valid using the given regexp. +func validateHeaders(fldPath *field.Path, headers []routev1.RouteHTTPHeader, valueRegexpForHeaderValue *regexp.Regexp, valueErrorMessage string) field.ErrorList { + allErrs := field.ErrorList{} + headersMap := map[string]struct{}{} + for i, header := range headers { + idxPath := fldPath.Index(i) + + // Each action must specify a unique header. + _, alreadyExists := headersMap[header.Name] + if alreadyExists { + err := field.Duplicate(idxPath.Child("name"), header.Name) + allErrs = append(allErrs, err) + } + headersMap[header.Name] = struct{}{} + + switch nameLength := len(header.Name); { + case nameLength == 0: + err := field.Required(idxPath.Child("name"), "") + allErrs = append(allErrs, err) + case nameLength > maxHeaderNameSize: + err := field.Invalid(idxPath.Child("name"), header.Name, fmt.Sprintf("name exceeds the maximum length, which is %d", maxHeaderNameSize)) + allErrs = append(allErrs, err) + case slices.Contains(notAllowedHTTPHeaders, strings.ToLower(header.Name)): + err := field.Forbidden(idxPath.Child("name"), notAllowedHTTPHeadersMessage) + allErrs = append(allErrs, err) + case !permittedHeaderNameRE.MatchString(header.Name): + err := field.Invalid(idxPath.Child("name"), header.Name, permittedHeaderNameErrorMessage) + allErrs = append(allErrs, err) + } + + if header.Action.Type != routev1.Set && header.Action.Type != routev1.Delete { + err := field.Invalid(idxPath.Child("action", "type"), header.Action.Type, fmt.Sprintf("type must be %q or %q", routev1.Set, routev1.Delete)) + allErrs = append(allErrs, err) + } + + if header.Action.Type == routev1.Set && header.Action.Set == nil || header.Action.Type != routev1.Set && header.Action.Set != nil { + err := field.Required(idxPath.Child("action", "set"), "set is required when type is Set, and forbidden otherwise") + allErrs = append(allErrs, err) + } + if header.Action.Set != nil { + switch valueLength := len(header.Action.Set.Value); { + case valueLength == 0: + err := field.Required(idxPath.Child("action", "set", "value"), "") + allErrs = append(allErrs, err) + case valueLength > maxHeaderValueSize: + err := field.Invalid(idxPath.Child("action", "set", "value"), header.Action.Set.Value, fmt.Sprintf("value exceeds the maximum length, which is %d", maxHeaderValueSize)) + allErrs = append(allErrs, err) + case !valueRegexpForHeaderValue.MatchString(header.Action.Set.Value): + err := field.Invalid(idxPath.Child("action", "set", "value"), header.Action.Set.Value, valueErrorMessage) + allErrs = append(allErrs, err) + } + } + } + return allErrs +} + +// The special finalizer name validations were copied from k8s.io/kubernetes to eliminate that +// dependency and preserve the existing behavior. + +// k8s.io/kubernetes/pkg/apis/core/validation.ValidateObjectMeta +func validateObjectMeta(meta *metav1.ObjectMeta, requiresNamespace bool, nameFn apimachineryvalidation.ValidateNameFunc, fldPath *field.Path) field.ErrorList { + allErrs := apimachineryvalidation.ValidateObjectMeta(meta, requiresNamespace, apimachineryvalidation.ValidateNameFunc(nameFn), fldPath) + // run additional checks for the finalizer name + for i := range meta.Finalizers { + allErrs = append(allErrs, validateKubeFinalizerName(string(meta.Finalizers[i]), fldPath.Child("finalizers").Index(i))...) + } + return allErrs +} + +// k8s.io/kubernetes/pkg/apis/core/validation.ValidateObjectMetaUpdate +func validateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList { + allErrs := apimachineryvalidation.ValidateObjectMetaUpdate(newMeta, oldMeta, fldPath) + // run additional checks for the finalizer name + for i := range newMeta.Finalizers { + allErrs = append(allErrs, validateKubeFinalizerName(string(newMeta.Finalizers[i]), fldPath.Child("finalizers").Index(i))...) + } + + return allErrs +} + +var standardFinalizers = sets.New( + string(corev1.FinalizerKubernetes), + metav1.FinalizerOrphanDependents, + metav1.FinalizerDeleteDependents, +) + +func validateKubeFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(strings.Split(stringValue, "/")) == 1 { + if !standardFinalizers.Has(stringValue) { + return append(allErrs, field.Invalid(fldPath, stringValue, "name is neither a standard finalizer name nor is it fully qualified")) + } + } + + return allErrs +} + +func Warnings(route *routev1.Route) []string { + if len(route.Spec.Host) != 0 && len(route.Spec.Subdomain) != 0 { + var warnings []string + warnings = append(warnings, "spec.host is set; spec.subdomain may be ignored") + return warnings + } + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldaputil/attribute.go b/vendor/github.com/openshift/library-go/pkg/security/ldaputil/attribute.go new file mode 100644 index 0000000000000..5e8bdd21241a9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldaputil/attribute.go @@ -0,0 +1,47 @@ +package ldaputil + +import ( + "encoding/base64" + "strings" + + "github.com/go-ldap/ldap/v3" +) + +// GetAttributeValue finds the first attribute of those given that the LDAP entry has, and +// returns it. GetAttributeValue is able to query the DN as well as Attributes of the LDAP entry. +// If no value is found, the empty string is returned. +func GetAttributeValue(entry *ldap.Entry, attributes []string) string { + for _, k := range attributes { + // Ignore empty attributes + if len(k) == 0 { + continue + } + // Special-case DN, since it's not an attribute + if strings.ToLower(k) == "dn" { + return entry.DN + } + // Otherwise get an attribute and return it if present + if v := entry.GetAttributeValue(k); len(v) > 0 { + return v + } + } + return "" +} + +func GetRawAttributeValue(entry *ldap.Entry, attributes []string) string { + for _, k := range attributes { + // Ignore empty attributes + if len(k) == 0 { + continue + } + // Special-case DN, since it's not an attribute + if strings.ToLower(k) == "dn" { + return base64.RawURLEncoding.EncodeToString([]byte(entry.DN)) + } + // Otherwise get an attribute and return it if present + if v := entry.GetRawAttributeValue(k); len(v) > 0 { + return base64.RawURLEncoding.EncodeToString(v) + } + } + return "" +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldaputil/url.go b/vendor/github.com/openshift/library-go/pkg/security/ldaputil/url.go new file mode 100644 index 0000000000000..8fa4c53c8f47a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldaputil/url.go @@ -0,0 +1,247 @@ +package ldaputil + +import ( + "fmt" + "net" + "net/url" + "strings" + + "github.com/go-ldap/ldap/v3" +) + +// Scheme is a valid ldap scheme +type Scheme string + +const ( + SchemeLDAP Scheme = "ldap" + SchemeLDAPS Scheme = "ldaps" +) + +// Scope is a valid LDAP search scope +type Scope int + +const ( + ScopeWholeSubtree Scope = ldap.ScopeWholeSubtree + ScopeSingleLevel Scope = ldap.ScopeSingleLevel + ScopeBaseObject Scope = ldap.ScopeBaseObject +) + +// DerefAliases is a valid LDAP alias dereference parameter +type DerefAliases int + +const ( + DerefAliasesNever = ldap.NeverDerefAliases + DerefAliasesSearching = ldap.DerefInSearching + DerefAliasesFinding = ldap.DerefFindingBaseObj + DerefAliasesAlways = ldap.DerefAlways +) + +const ( + defaultLDAPPort = "389" + defaultLDAPSPort = "636" + + defaultHost = "localhost" + defaultQueryAttribute = "uid" + defaultFilter = "(objectClass=*)" + + scopeWholeSubtreeString = "sub" + scopeSingleLevelString = "one" + scopeBaseObjectString = "base" + + criticalExtensionPrefix = "!" +) + +// LDAPURL holds a parsed RFC 2255 URL +type LDAPURL struct { + // Scheme is ldap or ldaps + Scheme Scheme + // Host is the host:port of the LDAP server + Host string + // The DN of the branch of the directory where all searches should start from + BaseDN string + // The attribute to search for + QueryAttribute string + // The scope of the search. Can be ldap.ScopeWholeSubtree, ldap.ScopeSingleLevel, or ldap.ScopeBaseObject + Scope Scope + // A valid LDAP search filter (e.g. "(objectClass=*)") + Filter string +} + +// ParseURL parsed the given ldapURL as an RFC 2255 URL +// The syntax of the URL is ldap://host:port/basedn?attribute?scope?filter +func ParseURL(ldapURL string) (LDAPURL, error) { + // Must be a valid URL to start + parsedURL, err := url.Parse(ldapURL) + if err != nil { + return LDAPURL{}, err + } + + opts := LDAPURL{} + + determinedScheme, err := DetermineLDAPScheme(parsedURL.Scheme) + if err != nil { + return LDAPURL{}, err + } + opts.Scheme = determinedScheme + + determinedHost, err := DetermineLDAPHost(parsedURL.Host, opts.Scheme) + if err != nil { + return LDAPURL{}, err + } + opts.Host = determinedHost + + // Set base dn (default to "") + // url.Parse() already percent-decodes the path + opts.BaseDN = strings.TrimLeft(parsedURL.Path, "/") + + attributes, scope, filter, extensions, err := SplitLDAPQuery(parsedURL.RawQuery) + if err != nil { + return LDAPURL{}, err + } + + // Attributes contains comma-separated attributes + // Set query attribute to first attribute + // Default to uid to match mod_auth_ldap + opts.QueryAttribute = strings.Split(attributes, ",")[0] + if len(opts.QueryAttribute) == 0 { + opts.QueryAttribute = defaultQueryAttribute + } + + determinedScope, err := DetermineLDAPScope(scope) + if err != nil { + return LDAPURL{}, err + } + opts.Scope = determinedScope + + determinedFilter, err := DetermineLDAPFilter(filter) + if err != nil { + return LDAPURL{}, err + } + opts.Filter = determinedFilter + + // Extensions are in "name=value,name2=value2" form + // Critical extensions are prefixed with a ! + // Optional extensions are ignored, per RFC + // Fail if there are any critical extensions, since we don't support any + if len(extensions) > 0 { + for _, extension := range strings.Split(extensions, ",") { + exttype := strings.SplitN(extension, "=", 2)[0] + if strings.HasPrefix(exttype, criticalExtensionPrefix) { + return LDAPURL{}, fmt.Errorf("unsupported critical extension %s", extension) + } + } + } + + return opts, nil + +} + +// DetermineLDAPScheme determines the LDAP connection scheme. Scheme is one of "ldap" or "ldaps" +// Default to "ldap" +func DetermineLDAPScheme(scheme string) (Scheme, error) { + switch Scheme(scheme) { + case SchemeLDAP, SchemeLDAPS: + return Scheme(scheme), nil + default: + return "", fmt.Errorf("invalid scheme %q", scheme) + } +} + +// DetermineLDAPHost determines the host and port for the LDAP connection. +// The default host is localhost; the default port for scheme "ldap" is 389, for "ldaps" is 686 +func DetermineLDAPHost(hostport string, scheme Scheme) (string, error) { + if len(hostport) == 0 { + hostport = defaultHost + } + // add port if missing + if _, _, err := net.SplitHostPort(hostport); err != nil { + switch scheme { + case SchemeLDAPS: + return net.JoinHostPort(hostport, defaultLDAPSPort), nil + case SchemeLDAP: + return net.JoinHostPort(hostport, defaultLDAPPort), nil + default: + return "", fmt.Errorf("no default port for scheme %q", scheme) + } + } + // nothing needed to be done + return hostport, nil +} + +// SplitLDAPQuery splits the query in the URL into the substituent parts. All sections are optional. +// Query syntax is attribute?scope?filter?extensions +func SplitLDAPQuery(query string) (attributes, scope, filter, extensions string, err error) { + parts := strings.Split(query, "?") + switch len(parts) { + case 4: + extensions = parts[3] + fallthrough + case 3: + if v, err := url.QueryUnescape(parts[2]); err != nil { + return "", "", "", "", err + } else { + filter = v + } + fallthrough + case 2: + if v, err := url.QueryUnescape(parts[1]); err != nil { + return "", "", "", "", err + } else { + scope = v + } + fallthrough + case 1: + if v, err := url.QueryUnescape(parts[0]); err != nil { + return "", "", "", "", err + } else { + attributes = v + } + return attributes, scope, filter, extensions, nil + case 0: + return + default: + err = fmt.Errorf("too many query options %q", query) + return "", "", "", "", err + } +} + +// DetermineLDAPScope determines the LDAP search scope. Scope is one of "sub", "one", or "base" +// Default to "sub" to match mod_auth_ldap +func DetermineLDAPScope(scope string) (Scope, error) { + switch scope { + case "", scopeWholeSubtreeString: + return ScopeWholeSubtree, nil + case scopeSingleLevelString: + return ScopeSingleLevel, nil + case scopeBaseObjectString: + return ScopeBaseObject, nil + default: + return -1, fmt.Errorf("invalid scope %q", scope) + } +} + +// DetermineLDAPFilter determines the LDAP search filter. Filter is a valid LDAP filter +// Default to "(objectClass=*)" per RFC +func DetermineLDAPFilter(filter string) (string, error) { + if len(filter) == 0 { + return defaultFilter, nil + } + if _, err := ldap.CompileFilter(filter); err != nil { + return "", fmt.Errorf("invalid filter: %v", err) + } + return filter, nil +} + +func DetermineDerefAliasesBehavior(derefAliasesString string) (DerefAliases, error) { + mapping := map[string]DerefAliases{ + "never": DerefAliasesNever, + "search": DerefAliasesSearching, + "base": DerefAliasesFinding, + "always": DerefAliasesAlways, + } + derefAliases, exists := mapping[derefAliasesString] + if !exists { + return -1, fmt.Errorf("not a valid LDAP alias dereferncing behavior: %s", derefAliasesString) + } + return derefAliases, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/uid/uid.go b/vendor/github.com/openshift/library-go/pkg/security/uid/uid.go new file mode 100644 index 0000000000000..836a71a5a41a8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/uid/uid.go @@ -0,0 +1,125 @@ +package uid + +import ( + "fmt" + "strings" +) + +type Block struct { + Start uint32 + End uint32 +} + +var ( + ErrBlockSlashBadFormat = fmt.Errorf("block not in the format \"/\"") + ErrBlockDashBadFormat = fmt.Errorf("block not in the format \"-\"") +) + +func ParseBlock(in string) (Block, error) { + if strings.Contains(in, "/") { + var start, size uint32 + n, err := fmt.Sscanf(in, "%d/%d", &start, &size) + if err != nil { + return Block{}, err + } + if n != 2 { + return Block{}, ErrBlockSlashBadFormat + } + return Block{Start: start, End: start + size - 1}, nil + } + + var start, end uint32 + n, err := fmt.Sscanf(in, "%d-%d", &start, &end) + if err != nil { + return Block{}, err + } + if n != 2 { + return Block{}, ErrBlockDashBadFormat + } + return Block{Start: start, End: end}, nil +} + +func (b Block) String() string { + return fmt.Sprintf("%d/%d", b.Start, b.Size()) +} + +func (b Block) RangeString() string { + return fmt.Sprintf("%d-%d", b.Start, b.End) +} + +func (b Block) Size() uint32 { + return b.End - b.Start + 1 +} + +type Range struct { + block Block + size uint32 +} + +func NewRange(start, end, size uint32) (*Range, error) { + if start > end { + return nil, fmt.Errorf("start %d must be less than end %d", start, end) + } + if size == 0 { + return nil, fmt.Errorf("block size must be a positive integer") + } + if (end - start) < size { + return nil, fmt.Errorf("block size must be less than or equal to the range") + } + return &Range{ + block: Block{start, end}, + size: size, + }, nil +} + +func ParseRange(in string) (*Range, error) { + var start, end, block uint32 + n, err := fmt.Sscanf(in, "%d-%d/%d", &start, &end, &block) + if err != nil { + return nil, err + } + if n != 3 { + return nil, fmt.Errorf("range not in the format \"-/\"") + } + return NewRange(start, end, block) +} + +func (r *Range) Size() uint32 { + return r.block.Size() / r.size +} + +func (r *Range) String() string { + return fmt.Sprintf("%s/%d", r.block.RangeString(), r.size) +} + +func (r *Range) BlockAt(offset uint32) (Block, bool) { + if offset > r.Size() { + return Block{}, false + } + start := r.block.Start + offset*r.size + return Block{ + Start: start, + End: start + r.size - 1, + }, true +} + +func (r *Range) Contains(block Block) bool { + ok, _ := r.Offset(block) + return ok +} + +func (r *Range) Offset(block Block) (bool, uint32) { + if block.Start < r.block.Start { + return false, 0 + } + if block.End > r.block.End { + return false, 0 + } + if block.End-block.Start+1 != r.size { + return false, 0 + } + if (block.Start-r.block.Start)%r.size != 0 { + return false, 0 + } + return true, (block.Start - r.block.Start) / r.size +} diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml new file mode 100644 index 0000000000000..571116cc39c64 --- /dev/null +++ b/vendor/go.uber.org/atomic/.codecov.yml @@ -0,0 +1,19 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure + +# Also update COVER_IGNORE_PKGS in the Makefile. +ignore: + - /internal/gen-atomicint/ + - /internal/gen-valuewrapper/ diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore new file mode 100644 index 0000000000000..c3fa253893f06 --- /dev/null +++ b/vendor/go.uber.org/atomic/.gitignore @@ -0,0 +1,12 @@ +/bin +.DS_Store +/vendor +cover.html +cover.out +lint.log + +# Binaries +*.test + +# Profiling output +*.prof diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml new file mode 100644 index 0000000000000..13d0a4f25404d --- /dev/null +++ b/vendor/go.uber.org/atomic/.travis.yml @@ -0,0 +1,27 @@ +sudo: false +language: go +go_import_path: go.uber.org/atomic + +env: + global: + - GO111MODULE=on + +matrix: + include: + - go: oldstable + - go: stable + env: LINT=1 + +cache: + directories: + - vendor + +before_install: + - go version + +script: + - test -z "$LINT" || make lint + - make cover + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md new file mode 100644 index 0000000000000..24c0274dc3215 --- /dev/null +++ b/vendor/go.uber.org/atomic/CHANGELOG.md @@ -0,0 +1,76 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [1.7.0] - 2020-09-14 +### Added +- Support JSON serialization and deserialization of primitive atomic types. +- Support Text marshalling and unmarshalling for string atomics. + +### Changed +- Disallow incorrect comparison of atomic values in a non-atomic way. + +### Removed +- Remove dependency on `golang.org/x/{lint, tools}`. + +## [1.6.0] - 2020-02-24 +### Changed +- Drop library dependency on `golang.org/x/{lint, tools}`. + +## [1.5.1] - 2019-11-19 +- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together + causing `CAS` to fail even though the old value matches. + +## [1.5.0] - 2019-10-29 +### Changed +- With Go modules, only the `go.uber.org/atomic` import path is supported now. + If you need to use the old import path, please add a `replace` directive to + your `go.mod`. + +## [1.4.0] - 2019-05-01 +### Added + - Add `atomic.Error` type for atomic operations on `error` values. + +## [1.3.2] - 2018-05-02 +### Added +- Add `atomic.Duration` type for atomic operations on `time.Duration` values. + +## [1.3.1] - 2017-11-14 +### Fixed +- Revert optimization for `atomic.String.Store("")` which caused data races. + +## [1.3.0] - 2017-11-13 +### Added +- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools. + +### Changed +- Optimize `atomic.String.Store("")` by avoiding an allocation. + +## [1.2.0] - 2017-04-12 +### Added +- Shadow `atomic.Value` from `sync/atomic`. + +## [1.1.0] - 2017-03-10 +### Added +- Add atomic `Float64` type. + +### Changed +- Support new `go.uber.org/atomic` import path. + +## [1.0.0] - 2016-07-18 + +- Initial release. + +[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0 +[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0 +[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1 +[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0 +[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0 +[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2 +[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1 +[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0 +[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0 +[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0 +[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0 diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt new file mode 100644 index 0000000000000..8765c9fbc6191 --- /dev/null +++ b/vendor/go.uber.org/atomic/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile new file mode 100644 index 0000000000000..1b1376d42533e --- /dev/null +++ b/vendor/go.uber.org/atomic/Makefile @@ -0,0 +1,78 @@ +# Directory to place `go install`ed binaries into. +export GOBIN ?= $(shell pwd)/bin + +GOLINT = $(GOBIN)/golint +GEN_ATOMICINT = $(GOBIN)/gen-atomicint +GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper +STATICCHECK = $(GOBIN)/staticcheck + +GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print) + +# Also update ignore section in .codecov.yml. +COVER_IGNORE_PKGS = \ + go.uber.org/atomic/internal/gen-atomicint \ + go.uber.org/atomic/internal/gen-atomicwrapper + +.PHONY: build +build: + go build ./... + +.PHONY: test +test: + go test -race ./... + +.PHONY: gofmt +gofmt: + $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) + gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true + @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false) + +$(GOLINT): + cd tools && go install golang.org/x/lint/golint + +$(STATICCHECK): + cd tools && go install honnef.co/go/tools/cmd/staticcheck + +$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*) + go build -o $@ ./internal/gen-atomicwrapper + +$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*) + go build -o $@ ./internal/gen-atomicint + +.PHONY: golint +golint: $(GOLINT) + $(GOLINT) ./... + +.PHONY: staticcheck +staticcheck: $(STATICCHECK) + $(STATICCHECK) ./... + +.PHONY: lint +lint: gofmt golint staticcheck generatenodirty + +# comma separated list of packages to consider for code coverage. +COVER_PKG = $(shell \ + go list -find ./... | \ + grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \ + paste -sd, -) + +.PHONY: cover +cover: + go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./... + go tool cover -html=cover.out -o cover.html + +.PHONY: generate +generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER) + go generate ./... + +.PHONY: generatenodirty +generatenodirty: + @[ -z "$$(git status --porcelain)" ] || ( \ + echo "Working tree is dirty. Commit your changes first."; \ + exit 1 ) + @make generate + @status=$$(git status --porcelain); \ + [ -z "$$status" ] || ( \ + echo "Working tree is dirty after `make generate`:"; \ + echo "$$status"; \ + echo "Please ensure that the generated code is up-to-date." ) diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md new file mode 100644 index 0000000000000..ade0c20f16b4a --- /dev/null +++ b/vendor/go.uber.org/atomic/README.md @@ -0,0 +1,63 @@ +# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard] + +Simple wrappers for primitive types to enforce atomic access. + +## Installation + +```shell +$ go get -u go.uber.org/atomic@v1 +``` + +### Legacy Import Path + +As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way +of using this package. If you are using Go modules, this package will fail to +compile with the legacy import path path `github.com/uber-go/atomic`. + +We recommend migrating your code to the new import path but if you're unable +to do so, or if your dependencies are still using the old import path, you +will have to add a `replace` directive to your `go.mod` file downgrading the +legacy import path to an older version. + +``` +replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0 +``` + +You can do so automatically by running the following command. + +```shell +$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0 +``` + +## Usage + +The standard library's `sync/atomic` is powerful, but it's easy to forget which +variables must be accessed atomically. `go.uber.org/atomic` preserves all the +functionality of the standard library, but wraps the primitive types to +provide a safer, more convenient API. + +```go +var atom atomic.Uint32 +atom.Store(42) +atom.Sub(2) +atom.CAS(40, 11) +``` + +See the [documentation][doc] for a complete API specification. + +## Development Status + +Stable. + +--- + +Released under the [MIT License](LICENSE.txt). + +[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg +[doc]: https://godoc.org/go.uber.org/atomic +[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master +[ci]: https://travis-ci.com/uber-go/atomic +[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/atomic +[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic +[reportcard]: https://goreportcard.com/report/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go new file mode 100644 index 0000000000000..9cf1914b1f826 --- /dev/null +++ b/vendor/go.uber.org/atomic/bool.go @@ -0,0 +1,81 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" +) + +// Bool is an atomic type-safe wrapper for bool values. +type Bool struct { + _ nocmp // disallow non-atomic comparison + + v Uint32 +} + +var _zeroBool bool + +// NewBool creates a new Bool. +func NewBool(v bool) *Bool { + x := &Bool{} + if v != _zeroBool { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped bool. +func (x *Bool) Load() bool { + return truthy(x.v.Load()) +} + +// Store atomically stores the passed bool. +func (x *Bool) Store(v bool) { + x.v.Store(boolToInt(v)) +} + +// CAS is an atomic compare-and-swap for bool values. +func (x *Bool) CAS(o, n bool) bool { + return x.v.CAS(boolToInt(o), boolToInt(n)) +} + +// Swap atomically stores the given bool and returns the old +// value. +func (x *Bool) Swap(o bool) bool { + return truthy(x.v.Swap(boolToInt(o))) +} + +// MarshalJSON encodes the wrapped bool into JSON. +func (x *Bool) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a bool from JSON. +func (x *Bool) UnmarshalJSON(b []byte) error { + var v bool + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/atomic/bool_ext.go new file mode 100644 index 0000000000000..c7bf7a827a81c --- /dev/null +++ b/vendor/go.uber.org/atomic/bool_ext.go @@ -0,0 +1,53 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "strconv" +) + +//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go + +func truthy(n uint32) bool { + return n == 1 +} + +func boolToInt(b bool) uint32 { + if b { + return 1 + } + return 0 +} + +// Toggle atomically negates the Boolean and returns the previous value. +func (b *Bool) Toggle() bool { + for { + old := b.Load() + if b.CAS(old, !old) { + return old + } + } +} + +// String encodes the wrapped value as a string. +func (b *Bool) String() string { + return strconv.FormatBool(b.Load()) +} diff --git a/vendor/go.uber.org/atomic/doc.go b/vendor/go.uber.org/atomic/doc.go new file mode 100644 index 0000000000000..ae7390ee6887e --- /dev/null +++ b/vendor/go.uber.org/atomic/doc.go @@ -0,0 +1,23 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package atomic provides simple wrappers around numerics to enforce atomic +// access. +package atomic diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go new file mode 100644 index 0000000000000..027cfcb20bf52 --- /dev/null +++ b/vendor/go.uber.org/atomic/duration.go @@ -0,0 +1,82 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "time" +) + +// Duration is an atomic type-safe wrapper for time.Duration values. +type Duration struct { + _ nocmp // disallow non-atomic comparison + + v Int64 +} + +var _zeroDuration time.Duration + +// NewDuration creates a new Duration. +func NewDuration(v time.Duration) *Duration { + x := &Duration{} + if v != _zeroDuration { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped time.Duration. +func (x *Duration) Load() time.Duration { + return time.Duration(x.v.Load()) +} + +// Store atomically stores the passed time.Duration. +func (x *Duration) Store(v time.Duration) { + x.v.Store(int64(v)) +} + +// CAS is an atomic compare-and-swap for time.Duration values. +func (x *Duration) CAS(o, n time.Duration) bool { + return x.v.CAS(int64(o), int64(n)) +} + +// Swap atomically stores the given time.Duration and returns the old +// value. +func (x *Duration) Swap(o time.Duration) time.Duration { + return time.Duration(x.v.Swap(int64(o))) +} + +// MarshalJSON encodes the wrapped time.Duration into JSON. +func (x *Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a time.Duration from JSON. +func (x *Duration) UnmarshalJSON(b []byte) error { + var v time.Duration + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go new file mode 100644 index 0000000000000..6273b66bd6597 --- /dev/null +++ b/vendor/go.uber.org/atomic/duration_ext.go @@ -0,0 +1,40 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "time" + +//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go + +// Add atomically adds to the wrapped time.Duration and returns the new value. +func (d *Duration) Add(n time.Duration) time.Duration { + return time.Duration(d.v.Add(int64(n))) +} + +// Sub atomically subtracts from the wrapped time.Duration and returns the new value. +func (d *Duration) Sub(n time.Duration) time.Duration { + return time.Duration(d.v.Sub(int64(n))) +} + +// String encodes the wrapped value as a string. +func (d *Duration) String() string { + return d.Load().String() +} diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go new file mode 100644 index 0000000000000..a6166fbea01e1 --- /dev/null +++ b/vendor/go.uber.org/atomic/error.go @@ -0,0 +1,51 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// Error is an atomic type-safe wrapper for error values. +type Error struct { + _ nocmp // disallow non-atomic comparison + + v Value +} + +var _zeroError error + +// NewError creates a new Error. +func NewError(v error) *Error { + x := &Error{} + if v != _zeroError { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped error. +func (x *Error) Load() error { + return unpackError(x.v.Load()) +} + +// Store atomically stores the passed error. +func (x *Error) Store(v error) { + x.v.Store(packError(v)) +} diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go new file mode 100644 index 0000000000000..ffe0be21cb017 --- /dev/null +++ b/vendor/go.uber.org/atomic/error_ext.go @@ -0,0 +1,39 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// atomic.Value panics on nil inputs, or if the underlying type changes. +// Stabilize by always storing a custom struct that we control. + +//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go + +type packedError struct{ Value error } + +func packError(v error) interface{} { + return packedError{v} +} + +func unpackError(v interface{}) error { + if err, ok := v.(packedError); ok { + return err.Value + } + return nil +} diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go new file mode 100644 index 0000000000000..0719060207da4 --- /dev/null +++ b/vendor/go.uber.org/atomic/float64.go @@ -0,0 +1,76 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "math" +) + +// Float64 is an atomic type-safe wrapper for float64 values. +type Float64 struct { + _ nocmp // disallow non-atomic comparison + + v Uint64 +} + +var _zeroFloat64 float64 + +// NewFloat64 creates a new Float64. +func NewFloat64(v float64) *Float64 { + x := &Float64{} + if v != _zeroFloat64 { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped float64. +func (x *Float64) Load() float64 { + return math.Float64frombits(x.v.Load()) +} + +// Store atomically stores the passed float64. +func (x *Float64) Store(v float64) { + x.v.Store(math.Float64bits(v)) +} + +// CAS is an atomic compare-and-swap for float64 values. +func (x *Float64) CAS(o, n float64) bool { + return x.v.CAS(math.Float64bits(o), math.Float64bits(n)) +} + +// MarshalJSON encodes the wrapped float64 into JSON. +func (x *Float64) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a float64 from JSON. +func (x *Float64) UnmarshalJSON(b []byte) error { + var v float64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go new file mode 100644 index 0000000000000..927b1add74e51 --- /dev/null +++ b/vendor/go.uber.org/atomic/float64_ext.go @@ -0,0 +1,47 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "strconv" + +//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -cas -json -imports math -file=float64.go + +// Add atomically adds to the wrapped float64 and returns the new value. +func (f *Float64) Add(s float64) float64 { + for { + old := f.Load() + new := old + s + if f.CAS(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float64 and returns the new value. +func (f *Float64) Sub(s float64) float64 { + return f.Add(-s) +} + +// String encodes the wrapped value as a string. +func (f *Float64) String() string { + // 'g' is the behavior for floats with %v. + return strconv.FormatFloat(f.Load(), 'g', -1, 64) +} diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go new file mode 100644 index 0000000000000..50d6b248588fa --- /dev/null +++ b/vendor/go.uber.org/atomic/gen.go @@ -0,0 +1,26 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go +//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go +//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go +//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go new file mode 100644 index 0000000000000..18ae56493ee98 --- /dev/null +++ b/vendor/go.uber.org/atomic/int32.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Int32 is an atomic wrapper around int32. +type Int32 struct { + _ nocmp // disallow non-atomic comparison + + v int32 +} + +// NewInt32 creates a new Int32. +func NewInt32(i int32) *Int32 { + return &Int32{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Int32) Load() int32 { + return atomic.LoadInt32(&i.v) +} + +// Add atomically adds to the wrapped int32 and returns the new value. +func (i *Int32) Add(n int32) int32 { + return atomic.AddInt32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int32 and returns the new value. +func (i *Int32) Sub(n int32) int32 { + return atomic.AddInt32(&i.v, -n) +} + +// Inc atomically increments the wrapped int32 and returns the new value. +func (i *Int32) Inc() int32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Int32) Dec() int32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int32) CAS(old, new int32) bool { + return atomic.CompareAndSwapInt32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int32) Store(n int32) { + atomic.StoreInt32(&i.v, n) +} + +// Swap atomically swaps the wrapped int32 and returns the old value. +func (i *Int32) Swap(n int32) int32 { + return atomic.SwapInt32(&i.v, n) +} + +// MarshalJSON encodes the wrapped int32 into JSON. +func (i *Int32) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped int32. +func (i *Int32) UnmarshalJSON(b []byte) error { + var v int32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Int32) String() string { + v := i.Load() + return strconv.FormatInt(int64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go new file mode 100644 index 0000000000000..2bcbbfaa95323 --- /dev/null +++ b/vendor/go.uber.org/atomic/int64.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Int64 is an atomic wrapper around int64. +type Int64 struct { + _ nocmp // disallow non-atomic comparison + + v int64 +} + +// NewInt64 creates a new Int64. +func NewInt64(i int64) *Int64 { + return &Int64{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Int64) Load() int64 { + return atomic.LoadInt64(&i.v) +} + +// Add atomically adds to the wrapped int64 and returns the new value. +func (i *Int64) Add(n int64) int64 { + return atomic.AddInt64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int64 and returns the new value. +func (i *Int64) Sub(n int64) int64 { + return atomic.AddInt64(&i.v, -n) +} + +// Inc atomically increments the wrapped int64 and returns the new value. +func (i *Int64) Inc() int64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int64 and returns the new value. +func (i *Int64) Dec() int64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int64) CAS(old, new int64) bool { + return atomic.CompareAndSwapInt64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int64) Store(n int64) { + atomic.StoreInt64(&i.v, n) +} + +// Swap atomically swaps the wrapped int64 and returns the old value. +func (i *Int64) Swap(n int64) int64 { + return atomic.SwapInt64(&i.v, n) +} + +// MarshalJSON encodes the wrapped int64 into JSON. +func (i *Int64) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped int64. +func (i *Int64) UnmarshalJSON(b []byte) error { + var v int64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Int64) String() string { + v := i.Load() + return strconv.FormatInt(int64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go new file mode 100644 index 0000000000000..a8201cb4a18ef --- /dev/null +++ b/vendor/go.uber.org/atomic/nocmp.go @@ -0,0 +1,35 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// nocmp is an uncomparable struct. Embed this inside another struct to make +// it uncomparable. +// +// type Foo struct { +// nocmp +// // ... +// } +// +// This DOES NOT: +// +// - Disallow shallow copies of structs +// - Disallow comparison of pointers to uncomparable structs +type nocmp [0]func() diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go new file mode 100644 index 0000000000000..225b7a2be0aa1 --- /dev/null +++ b/vendor/go.uber.org/atomic/string.go @@ -0,0 +1,54 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// String is an atomic type-safe wrapper for string values. +type String struct { + _ nocmp // disallow non-atomic comparison + + v Value +} + +var _zeroString string + +// NewString creates a new String. +func NewString(v string) *String { + x := &String{} + if v != _zeroString { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped string. +func (x *String) Load() string { + if v := x.v.Load(); v != nil { + return v.(string) + } + return _zeroString +} + +// Store atomically stores the passed string. +func (x *String) Store(v string) { + x.v.Store(v) +} diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go new file mode 100644 index 0000000000000..3a9558213d0dc --- /dev/null +++ b/vendor/go.uber.org/atomic/string_ext.go @@ -0,0 +1,43 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go + +// String returns the wrapped value. +func (s *String) String() string { + return s.Load() +} + +// MarshalText encodes the wrapped string into a textual form. +// +// This makes it encodable as JSON, YAML, XML, and more. +func (s *String) MarshalText() ([]byte, error) { + return []byte(s.Load()), nil +} + +// UnmarshalText decodes text and replaces the wrapped string with it. +// +// This makes it decodable from JSON, YAML, XML, and more. +func (s *String) UnmarshalText(b []byte) error { + s.Store(string(b)) + return nil +} diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go new file mode 100644 index 0000000000000..a973aba1a60b4 --- /dev/null +++ b/vendor/go.uber.org/atomic/uint32.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uint32 is an atomic wrapper around uint32. +type Uint32 struct { + _ nocmp // disallow non-atomic comparison + + v uint32 +} + +// NewUint32 creates a new Uint32. +func NewUint32(i uint32) *Uint32 { + return &Uint32{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Uint32) Load() uint32 { + return atomic.LoadUint32(&i.v) +} + +// Add atomically adds to the wrapped uint32 and returns the new value. +func (i *Uint32) Add(n uint32) uint32 { + return atomic.AddUint32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint32 and returns the new value. +func (i *Uint32) Sub(n uint32) uint32 { + return atomic.AddUint32(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint32 and returns the new value. +func (i *Uint32) Inc() uint32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint32 and returns the new value. +func (i *Uint32) Dec() uint32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint32) CAS(old, new uint32) bool { + return atomic.CompareAndSwapUint32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint32) Store(n uint32) { + atomic.StoreUint32(&i.v, n) +} + +// Swap atomically swaps the wrapped uint32 and returns the old value. +func (i *Uint32) Swap(n uint32) uint32 { + return atomic.SwapUint32(&i.v, n) +} + +// MarshalJSON encodes the wrapped uint32 into JSON. +func (i *Uint32) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uint32. +func (i *Uint32) UnmarshalJSON(b []byte) error { + var v uint32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uint32) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go new file mode 100644 index 0000000000000..3b6c71fd5a372 --- /dev/null +++ b/vendor/go.uber.org/atomic/uint64.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uint64 is an atomic wrapper around uint64. +type Uint64 struct { + _ nocmp // disallow non-atomic comparison + + v uint64 +} + +// NewUint64 creates a new Uint64. +func NewUint64(i uint64) *Uint64 { + return &Uint64{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Uint64) Load() uint64 { + return atomic.LoadUint64(&i.v) +} + +// Add atomically adds to the wrapped uint64 and returns the new value. +func (i *Uint64) Add(n uint64) uint64 { + return atomic.AddUint64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint64 and returns the new value. +func (i *Uint64) Sub(n uint64) uint64 { + return atomic.AddUint64(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint64 and returns the new value. +func (i *Uint64) Inc() uint64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint64 and returns the new value. +func (i *Uint64) Dec() uint64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint64) CAS(old, new uint64) bool { + return atomic.CompareAndSwapUint64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint64) Store(n uint64) { + atomic.StoreUint64(&i.v, n) +} + +// Swap atomically swaps the wrapped uint64 and returns the old value. +func (i *Uint64) Swap(n uint64) uint64 { + return atomic.SwapUint64(&i.v, n) +} + +// MarshalJSON encodes the wrapped uint64 into JSON. +func (i *Uint64) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uint64. +func (i *Uint64) UnmarshalJSON(b []byte) error { + var v uint64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uint64) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go new file mode 100644 index 0000000000000..671f3a382475b --- /dev/null +++ b/vendor/go.uber.org/atomic/value.go @@ -0,0 +1,31 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "sync/atomic" + +// Value shadows the type of the same name from sync/atomic +// https://godoc.org/sync/atomic#Value +type Value struct { + atomic.Value + + _ nocmp // disallow non-atomic comparison +} diff --git a/vendor/golang.org/x/crypto/md4/md4.go b/vendor/golang.org/x/crypto/md4/md4.go new file mode 100644 index 0000000000000..7d9281e02594f --- /dev/null +++ b/vendor/golang.org/x/crypto/md4/md4.go @@ -0,0 +1,122 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package md4 implements the MD4 hash algorithm as defined in RFC 1320. +// +// Deprecated: MD4 is cryptographically broken and should only be used +// where compatibility with legacy systems, not security, is the goal. Instead, +// use a secure hash like SHA-256 (from crypto/sha256). +package md4 + +import ( + "crypto" + "hash" +) + +func init() { + crypto.RegisterHash(crypto.MD4, New) +} + +// The size of an MD4 checksum in bytes. +const Size = 16 + +// The blocksize of MD4 in bytes. +const BlockSize = 64 + +const ( + _Chunk = 64 + _Init0 = 0x67452301 + _Init1 = 0xEFCDAB89 + _Init2 = 0x98BADCFE + _Init3 = 0x10325476 +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + s [4]uint32 + x [_Chunk]byte + nx int + len uint64 +} + +func (d *digest) Reset() { + d.s[0] = _Init0 + d.s[1] = _Init1 + d.s[2] = _Init2 + d.s[3] = _Init3 + d.nx = 0 + d.len = 0 +} + +// New returns a new hash.Hash computing the MD4 checksum. +func New() hash.Hash { + d := new(digest) + d.Reset() + return d +} + +func (d *digest) Size() int { return Size } + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Write(p []byte) (nn int, err error) { + nn = len(p) + d.len += uint64(nn) + if d.nx > 0 { + n := len(p) + if n > _Chunk-d.nx { + n = _Chunk - d.nx + } + for i := 0; i < n; i++ { + d.x[d.nx+i] = p[i] + } + d.nx += n + if d.nx == _Chunk { + _Block(d, d.x[0:]) + d.nx = 0 + } + p = p[n:] + } + n := _Block(d, p) + p = p[n:] + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +func (d0 *digest) Sum(in []byte) []byte { + // Make a copy of d0, so that caller can keep writing and summing. + d := new(digest) + *d = *d0 + + // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. + len := d.len + var tmp [64]byte + tmp[0] = 0x80 + if len%64 < 56 { + d.Write(tmp[0 : 56-len%64]) + } else { + d.Write(tmp[0 : 64+56-len%64]) + } + + // Length in bits. + len <<= 3 + for i := uint(0); i < 8; i++ { + tmp[i] = byte(len >> (8 * i)) + } + d.Write(tmp[0:8]) + + if d.nx != 0 { + panic("d.nx != 0") + } + + for _, s := range d.s { + in = append(in, byte(s>>0)) + in = append(in, byte(s>>8)) + in = append(in, byte(s>>16)) + in = append(in, byte(s>>24)) + } + return in +} diff --git a/vendor/golang.org/x/crypto/md4/md4block.go b/vendor/golang.org/x/crypto/md4/md4block.go new file mode 100644 index 0000000000000..5ea1ba966ea4d --- /dev/null +++ b/vendor/golang.org/x/crypto/md4/md4block.go @@ -0,0 +1,91 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// MD4 block step. +// In its own file so that a faster assembly or C version +// can be substituted easily. + +package md4 + +import "math/bits" + +var shift1 = []int{3, 7, 11, 19} +var shift2 = []int{3, 5, 9, 13} +var shift3 = []int{3, 9, 11, 15} + +var xIndex2 = []uint{0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15} +var xIndex3 = []uint{0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15} + +func _Block(dig *digest, p []byte) int { + a := dig.s[0] + b := dig.s[1] + c := dig.s[2] + d := dig.s[3] + n := 0 + var X [16]uint32 + for len(p) >= _Chunk { + aa, bb, cc, dd := a, b, c, d + + j := 0 + for i := 0; i < 16; i++ { + X[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24 + j += 4 + } + + // If this needs to be made faster in the future, + // the usual trick is to unroll each of these + // loops by a factor of 4; that lets you replace + // the shift[] lookups with constants and, + // with suitable variable renaming in each + // unrolled body, delete the a, b, c, d = d, a, b, c + // (or you can let the optimizer do the renaming). + // + // The index variables are uint so that % by a power + // of two can be optimized easily by a compiler. + + // Round 1. + for i := uint(0); i < 16; i++ { + x := i + s := shift1[i%4] + f := ((c ^ d) & b) ^ d + a += f + X[x] + a = bits.RotateLeft32(a, s) + a, b, c, d = d, a, b, c + } + + // Round 2. + for i := uint(0); i < 16; i++ { + x := xIndex2[i] + s := shift2[i%4] + g := (b & c) | (b & d) | (c & d) + a += g + X[x] + 0x5a827999 + a = bits.RotateLeft32(a, s) + a, b, c, d = d, a, b, c + } + + // Round 3. + for i := uint(0); i < 16; i++ { + x := xIndex3[i] + s := shift3[i%4] + h := b ^ c ^ d + a += h + X[x] + 0x6ed9eba1 + a = bits.RotateLeft32(a, s) + a, b, c, d = d, a, b, c + } + + a += aa + b += bb + c += cc + d += dd + + p = p[_Chunk:] + n += _Chunk + } + + dig.s[0] = a + dig.s[1] = b + dig.s[2] = c + dig.s[3] = d + return n +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index fe19e8f97a711..aa69fb4d509ff 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -719,6 +719,8 @@ type PythonSettings struct { // Some settings. Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // Experimental features to be included during client library generation. + ExperimentalFeatures *PythonSettings_ExperimentalFeatures `protobuf:"bytes,2,opt,name=experimental_features,json=experimentalFeatures,proto3" json:"experimental_features,omitempty"` } func (x *PythonSettings) Reset() { @@ -760,6 +762,13 @@ func (x *PythonSettings) GetCommon() *CommonLanguageSettings { return nil } +func (x *PythonSettings) GetExperimentalFeatures() *PythonSettings_ExperimentalFeatures { + if x != nil { + return x.ExperimentalFeatures + } + return nil +} + // Settings for Node client libraries. type NodeSettings struct { state protoimpl.MessageState @@ -1114,6 +1123,60 @@ func (x *MethodSettings) GetAutoPopulatedFields() []string { return nil } +// Experimental features to be included during client library generation. +// These fields will be deprecated once the feature graduates and is enabled +// by default. +type PythonSettings_ExperimentalFeatures struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Enables generation of asynchronous REST clients if `rest` transport is + // enabled. By default, asynchronous REST clients will not be generated. + // This feature will be enabled by default 1 month after launching the + // feature in preview packages. + RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"` +} + +func (x *PythonSettings_ExperimentalFeatures) Reset() { + *x = PythonSettings_ExperimentalFeatures{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PythonSettings_ExperimentalFeatures) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {} + +func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PythonSettings_ExperimentalFeatures.ProtoReflect.Descriptor instead. +func (*PythonSettings_ExperimentalFeatures) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool { + if x != nil { + return x.RestAsyncIoEnabled + } + return false +} + // Describes settings to use when generating API methods that use the // long-running operation pattern. // All default values below are from those used in the client library @@ -1142,7 +1205,7 @@ type MethodSettings_LongRunning struct { func (x *MethodSettings_LongRunning) Reset() { *x = MethodSettings_LongRunning{} if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[15] + mi := &file_google_api_client_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1155,7 +1218,7 @@ func (x *MethodSettings_LongRunning) String() string { func (*MethodSettings_LongRunning) ProtoMessage() {} func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[15] + mi := &file_google_api_client_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1460,132 +1523,143 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, - 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, - 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, - 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, - 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, - 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, - 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xfd, 0x01, + 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x64, 0x0a, 0x15, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x14, 0x65, 0x78, + 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65, + 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x41, + 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, + 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, + 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, + 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, + 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, + 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, + 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, + 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, - 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, - 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, - 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, - 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, - 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, - 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, - 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, - 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, - 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xc2, - 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, - 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, - 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, - 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, - 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, - 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, - 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, - 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, + 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, + 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, + 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, + 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, + 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, + 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, + 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, + 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, + 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, + 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, - 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, - 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, - 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, - 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, - 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, - 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, - 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, - 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, - 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, - 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x12, - 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, 0x0a, - 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, - 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, - 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, - 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, - 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, - 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, - 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, - 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, - 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x69, - 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, + 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, + 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, + 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, + 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, + 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, + 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, + 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, + 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, + 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, + 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, + 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, + 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -1601,34 +1675,35 @@ func file_google_api_client_proto_rawDescGZIP() []byte { } var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 17) var file_google_api_client_proto_goTypes = []interface{}{ - (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization - (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination - (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings - (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings - (*Publishing)(nil), // 4: google.api.Publishing - (*JavaSettings)(nil), // 5: google.api.JavaSettings - (*CppSettings)(nil), // 6: google.api.CppSettings - (*PhpSettings)(nil), // 7: google.api.PhpSettings - (*PythonSettings)(nil), // 8: google.api.PythonSettings - (*NodeSettings)(nil), // 9: google.api.NodeSettings - (*DotnetSettings)(nil), // 10: google.api.DotnetSettings - (*RubySettings)(nil), // 11: google.api.RubySettings - (*GoSettings)(nil), // 12: google.api.GoSettings - (*MethodSettings)(nil), // 13: google.api.MethodSettings - nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry - nil, // 15: google.api.DotnetSettings.RenamedServicesEntry - nil, // 16: google.api.DotnetSettings.RenamedResourcesEntry - (*MethodSettings_LongRunning)(nil), // 17: google.api.MethodSettings.LongRunning - (api.LaunchStage)(0), // 18: google.api.LaunchStage - (*durationpb.Duration)(nil), // 19: google.protobuf.Duration - (*descriptorpb.MethodOptions)(nil), // 20: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 21: google.protobuf.ServiceOptions + (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization + (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination + (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings + (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings + (*Publishing)(nil), // 4: google.api.Publishing + (*JavaSettings)(nil), // 5: google.api.JavaSettings + (*CppSettings)(nil), // 6: google.api.CppSettings + (*PhpSettings)(nil), // 7: google.api.PhpSettings + (*PythonSettings)(nil), // 8: google.api.PythonSettings + (*NodeSettings)(nil), // 9: google.api.NodeSettings + (*DotnetSettings)(nil), // 10: google.api.DotnetSettings + (*RubySettings)(nil), // 11: google.api.RubySettings + (*GoSettings)(nil), // 12: google.api.GoSettings + (*MethodSettings)(nil), // 13: google.api.MethodSettings + nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry + (*PythonSettings_ExperimentalFeatures)(nil), // 15: google.api.PythonSettings.ExperimentalFeatures + nil, // 16: google.api.DotnetSettings.RenamedServicesEntry + nil, // 17: google.api.DotnetSettings.RenamedResourcesEntry + (*MethodSettings_LongRunning)(nil), // 18: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 19: google.api.LaunchStage + (*durationpb.Duration)(nil), // 20: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 21: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions } var file_google_api_client_proto_depIdxs = []int32{ 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination - 18, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 19, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings @@ -1645,25 +1720,26 @@ var file_google_api_client_proto_depIdxs = []int32{ 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings - 15, // 20: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry - 16, // 21: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry - 2, // 22: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 23: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings - 17, // 24: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning - 19, // 25: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration - 19, // 26: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration - 19, // 27: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration - 20, // 28: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 21, // 29: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 21, // 30: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 21, // 31: google.api.api_version:extendee -> google.protobuf.ServiceOptions - 32, // [32:32] is the sub-list for method output_type - 32, // [32:32] is the sub-list for method input_type - 32, // [32:32] is the sub-list for extension type_name - 28, // [28:32] is the sub-list for extension extendee - 0, // [0:28] is the sub-list for field type_name + 15, // 18: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures + 2, // 19: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 20: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 16, // 21: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry + 17, // 22: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry + 2, // 23: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 24: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 18, // 25: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 20, // 26: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 20, // 27: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 20, // 28: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 21, // 29: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 22, // 30: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 22, // 31: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 22, // 32: google.api.api_version:extendee -> google.protobuf.ServiceOptions + 33, // [33:33] is the sub-list for method output_type + 33, // [33:33] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 29, // [29:33] is the sub-list for extension extendee + 0, // [0:29] is the sub-list for field type_name } func init() { file_google_api_client_proto_init() } @@ -1816,7 +1892,19 @@ func file_google_api_client_proto_init() { return nil } } - file_google_api_client_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PythonSettings_ExperimentalFeatures); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MethodSettings_LongRunning); i { case 0: return &v.state @@ -1835,7 +1923,7 @@ func file_google_api_client_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, NumEnums: 2, - NumMessages: 16, + NumMessages: 17, NumExtensions: 4, NumServices: 0, }, diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index 6a8a07781ae34..5d4096d46a048 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,21 +9,28 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) +- [aranjans](https://github.com/aranjans), Google LLC +- [arjan-bal](https://github.com/arjan-bal), Google LLC +- [arvindbr8](https://github.com/arvindbr8), Google LLC - [atollena](https://github.com/atollena), Datadog, Inc. -- [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [menghanl](https://github.com/menghanl), Google LLC -- [srini100](https://github.com/srini100), Google LLC +- [erm-g](https://github.com/erm-g), Google LLC +- [gtcooke94](https://github.com/gtcooke94), Google LLC +- [purnesh42h](https://github.com/purnesh42h), Google LLC +- [zasweq](https://github.com/zasweq), Google LLC ## Emeritus Maintainers (in alphabetical order) -- [adelez](https://github.com/adelez), Google LLC -- [canguler](https://github.com/canguler), Google LLC -- [iamqizhao](https://github.com/iamqizhao), Google LLC -- [jadekler](https://github.com/jadekler), Google LLC -- [jtattermusch](https://github.com/jtattermusch), Google LLC -- [lyuxuan](https://github.com/lyuxuan), Google LLC -- [makmukhi](https://github.com/makmukhi), Google LLC -- [matt-kwong](https://github.com/matt-kwong), Google LLC -- [nicolasnoble](https://github.com/nicolasnoble), Google LLC -- [yongni](https://github.com/yongni), Google LLC +- [adelez](https://github.com/adelez) +- [canguler](https://github.com/canguler) +- [cesarghali](https://github.com/cesarghali) +- [iamqizhao](https://github.com/iamqizhao) +- [jeanbza](https://github.com/jeanbza) +- [jtattermusch](https://github.com/jtattermusch) +- [lyuxuan](https://github.com/lyuxuan) +- [makmukhi](https://github.com/makmukhi) +- [matt-kwong](https://github.com/matt-kwong) +- [menghanl](https://github.com/menghanl) +- [nicolasnoble](https://github.com/nicolasnoble) +- [srini100](https://github.com/srini100) +- [yongni](https://github.com/yongni) diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md index be6e108705c48..abab279379ba8 100644 --- a/vendor/google.golang.org/grpc/SECURITY.md +++ b/vendor/google.golang.org/grpc/SECURITY.md @@ -1,3 +1,3 @@ # Security Policy -For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). +For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go index 0787d0b50ce94..d7b40b7cb66f1 100644 --- a/vendor/google.golang.org/grpc/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -39,7 +39,7 @@ type Config struct { MaxDelay time.Duration } -// DefaultConfig is a backoff configuration with the default values specfied +// DefaultConfig is a backoff configuration with the default values specified // at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // // This should be useful for callers who want to configure backoff with diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index f391744f7299b..b181f386a1ba6 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" @@ -72,8 +73,21 @@ func unregisterForTesting(name string) { delete(m, name) } +// connectedAddress returns the connected address for a SubConnState. The +// address is only valid if the state is READY. +func connectedAddress(scs SubConnState) resolver.Address { + return scs.connectedAddress +} + +// setConnectedAddress sets the connected address for a SubConnState. +func setConnectedAddress(scs *SubConnState, addr resolver.Address) { + scs.connectedAddress = addr +} + func init() { internal.BalancerUnregister = unregisterForTesting + internal.ConnectedAddress = connectedAddress + internal.SetConnectedAddress = setConnectedAddress } // Get returns the resolver builder registered with the given name. @@ -243,6 +257,10 @@ type BuildOptions struct { // same resolver.Target as passed to the resolver. See the documentation for // the resolver.Target type for details about what it contains. Target resolver.Target + // MetricsRecorder is the metrics recorder that balancers can use to record + // metrics. Balancer implementations which do not register metrics on + // metrics registry and record on them can ignore this field. + MetricsRecorder estats.MetricsRecorder } // Builder creates a balancer. @@ -410,6 +428,9 @@ type SubConnState struct { // ConnectionError is set if the ConnectivityState is TransientFailure, // describing the reason the SubConn failed. Otherwise, it is nil. ConnectionError error + // connectedAddr contains the connected address when ConnectivityState is + // Ready. Otherwise, it is indeterminate. + connectedAddress resolver.Address } // ClientConnState describes the state of a ClientConn relevant to the diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index a7f1eeec8e6ae..2b87bd79c7573 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -36,7 +36,7 @@ type baseBuilder struct { config Config } -func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { bal := &baseBalancer{ cc: cc, pickerBuilder: bb.pickerBuilder, @@ -259,6 +259,6 @@ type errPicker struct { err error // Pick() always returns this err. } -func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return balancer.PickResult{}, p.err } diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index 07527603f1d4e..4d69b4052f8e3 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -50,7 +50,7 @@ const ( type pickfirstBuilder struct{} -func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { b := &pickfirstBalancer{cc: cc} b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b @@ -155,7 +155,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // Endpoints not set, process addresses until we migrate resolver // emissions fully to Endpoints. The top channel does wrap emitted // addresses with endpoints, however some balancers such as weighted - // target do not forwarrd the corresponding correct endpoints down/split + // target do not forward the corresponding correct endpoints down/split // endpoints properly. Once all balancers correctly forward endpoints // down, can delete this else conditional. addrs = state.ResolverState.Addresses diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 4161fdf47a8b1..8ad6ce2f0950a 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -25,12 +25,15 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) +var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) + // ccBalancerWrapper sits between the ClientConn and the Balancer. // // ccBalancerWrapper implements methods corresponding to the ones on the @@ -79,6 +82,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParent: cc.channelz, Target: cc.parsedTarget, + MetricsRecorder: cc.metricsRecorderList, }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, @@ -92,7 +96,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { // it is safe to call into the balancer here. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { errCh := make(chan error) - ok := ccb.serializer.Schedule(func(ctx context.Context) { + uccs := func(ctx context.Context) { defer close(errCh) if ctx.Err() != nil || ccb.balancer == nil { return @@ -107,17 +111,23 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat logger.Infof("error from balancer.UpdateClientConnState: %v", err) } errCh <- err - }) - if !ok { - return nil } + onFailure := func() { close(errCh) } + + // UpdateClientConnState can race with Close, and when the latter wins, the + // serializer is closed, and the attempt to schedule the callback will fail. + // It is acceptable to ignore this failure. But since we want to handle the + // state update in a blocking fashion (when we successfully schedule the + // callback), we have to use the ScheduleOr method and not the MaybeSchedule + // method on the serializer. + ccb.serializer.ScheduleOr(uccs, onFailure) return <-errCh } // resolverError is invoked by grpc to push a resolver error to the underlying // balancer. The call to the balancer is executed from the serializer. func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.serializer.Schedule(func(ctx context.Context) { + ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -133,7 +143,7 @@ func (ccb *ccBalancerWrapper) close() { ccb.closed = true ccb.mu.Unlock() channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing") - ccb.serializer.Schedule(func(context.Context) { + ccb.serializer.TrySchedule(func(context.Context) { if ccb.balancer == nil { return } @@ -145,7 +155,7 @@ func (ccb *ccBalancerWrapper) close() { // exitIdle invokes the balancer's exitIdle method in the serializer. func (ccb *ccBalancerWrapper) exitIdle() { - ccb.serializer.Schedule(func(ctx context.Context) { + ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -182,7 +192,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer return acbw, nil } -func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { +func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) { // The graceful switch balancer will never call this. logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") } @@ -252,15 +262,29 @@ type acBalancerWrapper struct { // updateState is invoked by grpc to push a subConn state update to the // underlying balancer. -func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { - acbw.ccb.serializer.Schedule(func(ctx context.Context) { +func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) { + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || acbw.ccb.balancer == nil { return } // Even though it is optional for balancers, gracefulswitch ensures // opts.StateListener is set, so this cannot ever be nil. // TODO: delete this comment when UpdateSubConnState is removed. - acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err} + if s == connectivity.Ready { + setConnectedAddress(&scs, curAddr) + } + acbw.stateListener(scs) + acbw.ac.mu.Lock() + defer acbw.ac.mu.Unlock() + if s == connectivity.Ready { + // When changing states to READY, reset stateReadyChan. Wait until + // after we notify the LB policy's listener(s) in order to prevent + // ac.getTransport() from unblocking before the LB policy starts + // tracking the subchannel as READY. + close(acbw.ac.stateReadyChan) + acbw.ac.stateReadyChan = make(chan struct{}) + } }) } @@ -318,8 +342,8 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) ( pData := acbw.producers[pb] if pData == nil { // Not found; create a new one and add it to the producers map. - p, close := pb.Build(acbw) - pData = &refCountedProducer{producer: p, close: close} + p, closeFn := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: closeFn} acbw.producers[pb] = pData } // Account for this new reference. diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 63c639e4fe933..55bffaa77ef0f 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 @@ -1015,7 +1015,7 @@ func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ +var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{ (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type @@ -1058,7 +1058,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*GrpcLogEntry); i { case 0: return &v.state @@ -1070,7 +1070,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ClientHeader); i { case 0: return &v.state @@ -1082,7 +1082,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ServerHeader); i { case 0: return &v.state @@ -1094,7 +1094,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*Trailer); i { case 0: return &v.state @@ -1106,7 +1106,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*Message); i { case 0: return &v.state @@ -1118,7 +1118,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*Metadata); i { case 0: return &v.state @@ -1130,7 +1130,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*MetadataEntry); i { case 0: return &v.state @@ -1142,7 +1142,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*Address); i { case 0: return &v.state @@ -1155,7 +1155,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { } } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), (*GrpcLogEntry_Message)(nil), diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 423be7b43b00c..9c8850e3fdd5b 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -24,6 +24,7 @@ import ( "fmt" "math" "net/url" + "slices" "strings" "sync" "sync/atomic" @@ -39,6 +40,7 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -194,8 +196,11 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + return cc, nil } @@ -590,13 +595,14 @@ type ClientConn struct { cancel context.CancelFunc // Cancelled on close. // The following are initialized at dial time, and are read-only after that. - target string // User's dial target. - parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). - authority string // See initAuthority(). - dopts dialOptions // Default and user specified dial options. - channelz *channelz.Channel // Channelz object. - resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). - idlenessMgr *idle.Manager + target string // User's dial target. + parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). + authority string // See initAuthority(). + dopts dialOptions // Default and user specified dial options. + channelz *channelz.Channel // Channelz object. + resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). + idlenessMgr *idle.Manager + metricsRecorderList *stats.MetricsRecorderList // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -626,11 +632,6 @@ type ClientConn struct { // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { ch := cc.csMgr.getNotifyChan() if cc.csMgr.getState() != sourceState { @@ -645,11 +646,6 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec } // GetState returns the connectivity.State of ClientConn. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a later -// release. func (cc *ClientConn) GetState() connectivity.State { return cc.csMgr.getState() } @@ -812,17 +808,11 @@ func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { cc.csMgr.updateState(connectivity.TransientFailure) } -// Makes a copy of the input addresses slice and clears out the balancer -// attributes field. Addresses are passed during subconn creation and address -// update operations. In both cases, we will clear the balancer attributes by -// calling this function, and therefore we will be able to use the Equal method -// provided by the resolver.Address type for comparison. -func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { +// Makes a copy of the input addresses slice. Addresses are passed during +// subconn creation and address update operations. +func copyAddresses(in []resolver.Address) []resolver.Address { out := make([]resolver.Address, len(in)) - for i := range in { - out[i] = in[i] - out[i].BalancerAttributes = nil - } + copy(out, in) return out } @@ -835,14 +825,14 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. } ac := &addrConn{ - state: connectivity.Idle, - cc: cc, - addrs: copyAddressesWithoutBalancerAttributes(addrs), - scopts: opts, - dopts: cc.dopts, - channelz: channelz.RegisterSubChannel(cc.channelz, ""), - resetBackoff: make(chan struct{}), - stateChan: make(chan struct{}), + state: connectivity.Idle, + cc: cc, + addrs: copyAddresses(addrs), + scopts: opts, + dopts: cc.dopts, + channelz: channelz.RegisterSubChannel(cc.channelz, ""), + resetBackoff: make(chan struct{}), + stateReadyChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Start with our address set to the first address; this may be updated if @@ -918,28 +908,29 @@ func (ac *addrConn) connect() error { ac.mu.Unlock() return nil } - ac.mu.Unlock() - ac.resetTransport() + ac.resetTransportAndUnlock() return nil } -func equalAddresses(a, b []resolver.Address) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if !v.Equal(b[i]) { - return false - } - } - return true +// equalAddressIgnoringBalAttributes returns true is a and b are considered equal. +// This is different from the Equal method on the resolver.Address type which +// considers all fields to determine equality. Here, we only consider fields +// that are meaningful to the subConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} + +func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool { + return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) }) } // updateAddrs updates ac.addrs with the new addresses list and handles active // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { - addrs = copyAddressesWithoutBalancerAttributes(addrs) + addrs = copyAddresses(addrs) limit := len(addrs) if limit > 5 { limit = 5 @@ -947,7 +938,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit]) ac.mu.Lock() - if equalAddresses(ac.addrs, addrs) { + if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) { ac.mu.Unlock() return } @@ -966,7 +957,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { // Try to find the connected address. for _, a := range addrs { a.ServerName = ac.cc.getServerName(a) - if a.Equal(ac.curAddr) { + if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) { // We are connected to a valid address, so do nothing but // update the addresses. ac.mu.Unlock() @@ -992,11 +983,9 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.updateConnectivityState(connectivity.Idle, nil) } - ac.mu.Unlock() - // Since we were connecting/connected, we should start a new connection // attempt. - go ac.resetTransport() + go ac.resetTransportAndUnlock() } // getServerName determines the serverName to be used in the connection @@ -1190,8 +1179,8 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State - stateChan chan struct{} // closed and recreated on every state change. + state connectivity.State + stateReadyChan chan struct{} // closed and recreated on every READY state change. backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1204,9 +1193,6 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } - // When changing states, reset the state change channel. - close(ac.stateChan) - ac.stateChan = make(chan struct{}) ac.state = s ac.channelz.ChannelMetrics.State.Store(&s) if lastErr == nil { @@ -1214,7 +1200,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) } else { channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } - ac.acbw.updateState(s, lastErr) + ac.acbw.updateState(s, ac.curAddr, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1231,8 +1217,10 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { } } -func (ac *addrConn) resetTransport() { - ac.mu.Lock() +// resetTransportAndUnlock unconditionally connects the addrConn. +// +// ac.mu must be held by the caller, and this function will guarantee it is released. +func (ac *addrConn) resetTransportAndUnlock() { acCtx := ac.ctx if acCtx.Err() != nil { ac.mu.Unlock() @@ -1522,7 +1510,7 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { for ctx.Err() == nil { ac.mu.Lock() - t, state, sc := ac.transport, ac.state, ac.stateChan + t, state, sc := ac.transport, ac.state, ac.stateReadyChan ac.mu.Unlock() if state == connectivity.Ready { return t, nil @@ -1585,7 +1573,7 @@ func (ac *addrConn) tearDown(err error) { } else { // Hard close the transport when the channel is entering idle or is // being shutdown. In the case where the channel is being shutdown, - // closing of transports is also taken care of by cancelation of cc.ctx. + // closing of transports is also taken care of by cancellation of cc.ctx. // But in the case where the channel is entering idle, we need to // explicitly close the transports here. Instead of distinguishing // between these two cases, it is simpler to close the transport diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 411e3dfd47ccd..e840858b77b18 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -21,18 +21,73 @@ package grpc import ( "google.golang.org/grpc/encoding" _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" + "google.golang.org/grpc/mem" ) -// baseCodec contains the functionality of both Codec and encoding.Codec, but -// omits the name/string, which vary between the two and are not needed for -// anything besides the registry in the encoding package. +// baseCodec captures the new encoding.CodecV2 interface without the Name +// function, allowing it to be implemented by older Codec and encoding.Codec +// implementations. The omitted Name function is only needed for the register in +// the encoding package and is not part of the core functionality. type baseCodec interface { - Marshal(v any) ([]byte, error) - Unmarshal(data []byte, v any) error + Marshal(v any) (mem.BufferSlice, error) + Unmarshal(data mem.BufferSlice, v any) error +} + +// getCodec returns an encoding.CodecV2 for the codec of the given name (if +// registered). Initially checks the V2 registry with encoding.GetCodecV2 and +// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry +// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge +// to turn it into an encoding.CodecV2. Returns nil otherwise. +func getCodec(name string) encoding.CodecV2 { + if codecV1 := encoding.GetCodec(name); codecV1 != nil { + return newCodecV1Bridge(codecV1) + } + + return encoding.GetCodecV2(name) +} + +func newCodecV0Bridge(c Codec) baseCodec { + return codecV0Bridge{codec: c} +} + +func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 { + return codecV1Bridge{ + codecV0Bridge: codecV0Bridge{codec: c}, + name: c.Name(), + } +} + +var _ baseCodec = codecV0Bridge{} + +type codecV0Bridge struct { + codec interface { + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error + } +} + +func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { + data, err := c.codec.Marshal(v) + if err != nil { + return nil, err + } + return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil +} + +func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { + return c.codec.Unmarshal(data.Materialize(), v) } -var _ baseCodec = Codec(nil) -var _ baseCodec = encoding.Codec(nil) +var _ encoding.CodecV2 = codecV1Bridge{} + +type codecV1Bridge struct { + codecV0Bridge + name string +} + +func (c codecV1Bridge) Name() string { + return c.name +} // Codec defines the interface gRPC uses to encode and decode messages. // Note that implementations of this interface must be thread safe; diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 82bee1443bfee..4c805c64462c9 100644 --- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -40,7 +40,7 @@ func NewCredentials() credentials.TransportCredentials { // NoSecurity. type insecureTC struct{} -func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { +func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil } diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index f5453d48a53f3..2b285beee376b 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" ) @@ -60,7 +61,7 @@ func init() { internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions - internal.WithRecvBufferPool = withRecvBufferPool + internal.WithBufferPool = withBufferPool } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -92,7 +93,6 @@ type dialOptions struct { defaultServiceConfigRawJSON *string resolvers []resolver.Builder idleTimeout time.Duration - recvBufferPool SharedBufferPool defaultScheme string maxCallAttempts int } @@ -518,6 +518,8 @@ func WithUserAgent(s string) DialOption { // WithKeepaliveParams returns a DialOption that specifies keepalive parameters // for the client transport. +// +// Keepalive is disabled by default. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { if kp.Time < internal.KeepaliveMinPingTime { logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) @@ -677,11 +679,11 @@ func defaultDialOptions() dialOptions { WriteBufferSize: defaultWriteBufSize, UseProxy: true, UserAgent: grpcUA, + BufferPool: mem.DefaultBufferPool(), }, bs: internalbackoff.DefaultExponential, healthCheckFunc: internal.HealthCheckFunc, idleTimeout: 30 * time.Minute, - recvBufferPool: nopBufferPool{}, defaultScheme: "dns", maxCallAttempts: defaultMaxCallAttempts, } @@ -758,25 +760,8 @@ func WithMaxCallAttempts(n int) DialOption { }) } -// WithRecvBufferPool returns a DialOption that configures the ClientConn -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: WithStatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. -// -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { - return withRecvBufferPool(bufferPool) -} - -func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { +func withBufferPool(bufferPool mem.BufferPool) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.recvBufferPool = bufferPool + o.copts.BufferPool = bufferPool }) } diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index 0022859ad7465..e7b532b6f806f 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -16,7 +16,7 @@ * */ -//go:generate ./regenerate.sh +//go:generate ./scripts/regenerate.sh /* Package grpc implements an RPC system called gRPC. diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 5ebf88d7147f2..11d0ae142c429 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -94,7 +94,7 @@ type Codec interface { Name() string } -var registeredCodecs = make(map[string]Codec) +var registeredCodecs = make(map[string]any) // RegisterCodec registers the provided Codec for use with all gRPC clients and // servers. @@ -126,5 +126,6 @@ func RegisterCodec(codec Codec) { // // The content-subtype is expected to be lowercase. func GetCodec(contentSubtype string) Codec { - return registeredCodecs[contentSubtype] + c, _ := registeredCodecs[contentSubtype].(Codec) + return c } diff --git a/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/vendor/google.golang.org/grpc/encoding/encoding_v2.go new file mode 100644 index 0000000000000..074c5e234a7b3 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding_v2.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package encoding + +import ( + "strings" + + "google.golang.org/grpc/mem" +) + +// CodecV2 defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a CodecV2's +// methods can be called from concurrent goroutines. +type CodecV2 interface { + // Marshal returns the wire format of v. The buffers in the returned + // [mem.BufferSlice] must have at least one reference each, which will be freed + // by gRPC when they are no longer needed. + Marshal(v any) (out mem.BufferSlice, err error) + // Unmarshal parses the wire format into v. Note that data will be freed as soon + // as this function returns. If the codec wishes to guarantee access to the data + // after this function, it must take its own reference that it frees when it is + // no longer needed. + Unmarshal(data mem.BufferSlice, v any) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and +// servers. +// +// The CodecV2 will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the CodecV2. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodecV2 will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If both a Codec and CodecV2 are registered with the same name, the CodecV2 +// will be used. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Codecs are +// registered with the same name, the one registered last will take effect. +func RegisterCodecV2(codec CodecV2) { + if codec == nil { + panic("cannot register a nil CodecV2") + } + if codec.Name() == "" { + panic("cannot register CodecV2 with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodecV2(contentSubtype string) CodecV2 { + c, _ := registeredCodecs[contentSubtype].(CodecV2) + return c +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 66d5cdf03ec58..ceec319dd2fb4 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -1,6 +1,6 @@ /* * - * Copyright 2018 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ import ( "fmt" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/protoadapt" ) @@ -32,28 +33,51 @@ import ( const Name = "proto" func init() { - encoding.RegisterCodec(codec{}) + encoding.RegisterCodecV2(&codecV2{}) } -// codec is a Codec implementation with protobuf. It is the default codec for gRPC. -type codec struct{} +// codec is a CodecV2 implementation with protobuf. It is the default codec for +// gRPC. +type codecV2 struct{} -func (codec) Marshal(v any) ([]byte, error) { +func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { vv := messageV2Of(v) if vv == nil { - return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) + return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v) } - return proto.Marshal(vv) + size := proto.Size(vv) + if mem.IsBelowBufferPoolingThreshold(size) { + buf, err := proto.Marshal(vv) + if err != nil { + return nil, err + } + data = append(data, mem.SliceBuffer(buf)) + } else { + pool := mem.DefaultBufferPool() + buf := pool.Get(size) + if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil { + pool.Put(buf) + return nil, err + } + data = append(data, mem.NewBuffer(buf, pool)) + } + + return data, nil } -func (codec) Unmarshal(data []byte, v any) error { +func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) { vv := messageV2Of(v) if vv == nil { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) } - return proto.Unmarshal(data, vv) + buf := data.MaterializeToBuffer(mem.DefaultBufferPool()) + defer buf.Free() + // TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not + // really possible without a major overhaul of the proto package, but the + // vtprotobuf library may be able to support this. + return proto.Unmarshal(buf.ReadOnlyData(), vv) } func messageV2Of(v any) proto.Message { @@ -67,6 +91,6 @@ func messageV2Of(v any) proto.Message { return nil } -func (codec) Name() string { +func (c *codecV2) Name() string { return Name } diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go new file mode 100644 index 0000000000000..1d827dd5d9d41 --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -0,0 +1,269 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "maps" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" +) + +func init() { + internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting +} + +var logger = grpclog.Component("metrics-registry") + +// DefaultMetrics are the default metrics registered through global metrics +// registry. This is written to at initialization time only, and is read only +// after initialization. +var DefaultMetrics = NewMetrics() + +// MetricDescriptor is the data for a registered metric. +type MetricDescriptor struct { + // The name of this metric. This name must be unique across the whole binary + // (including any per call metrics). See + // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions + // for metric naming conventions. + Name Metric + // The description of this metric. + Description string + // The unit (e.g. entries, seconds) of this metric. + Unit string + // The required label keys for this metric. These are intended to + // metrics emitted from a stats handler. + Labels []string + // The optional label keys for this metric. These are intended to attached + // to metrics emitted from a stats handler if configured. + OptionalLabels []string + // Whether this metric is on by default. + Default bool + // The type of metric. This is set by the metric registry, and not intended + // to be set by a component registering a metric. + Type MetricType + // Bounds are the bounds of this metric. This only applies to histogram + // metrics. If unset or set with length 0, stats handlers will fall back to + // default bounds. + Bounds []float64 +} + +// MetricType is the type of metric. +type MetricType int + +// Type of metric supported by this instrument registry. +const ( + MetricTypeIntCount MetricType = iota + MetricTypeFloatCount + MetricTypeIntHisto + MetricTypeFloatHisto + MetricTypeIntGauge +) + +// Int64CountHandle is a typed handle for a int count metric. This handle +// is passed at the recording point in order to know which metric to record +// on. +type Int64CountHandle MetricDescriptor + +// Descriptor returns the int64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 count value on the metrics recorder provided. +func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Count(h, incr, labels...) +} + +// Float64CountHandle is a typed handle for a float count metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Float64CountHandle MetricDescriptor + +// Descriptor returns the float64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 count value on the metrics recorder provided. +func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Count(h, incr, labels...) +} + +// Int64HistoHandle is a typed handle for an int histogram metric. This handle +// is passed at the recording point in order to know which metric to record on. +type Int64HistoHandle MetricDescriptor + +// Descriptor returns the int64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Histo(h, incr, labels...) +} + +// Float64HistoHandle is a typed handle for a float histogram metric. This +// handle is passed at the recording point in order to know which metric to +// record on. +type Float64HistoHandle MetricDescriptor + +// Descriptor returns the float64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 histo value on the metrics recorder provided. +func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Histo(h, incr, labels...) +} + +// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Int64GaugeHandle MetricDescriptor + +// Descriptor returns the int64 gauge handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Gauge(h, incr, labels...) +} + +// registeredMetrics are the registered metric descriptor names. +var registeredMetrics = make(map[Metric]bool) + +// metricsRegistry contains all of the registered metrics. +// +// This is written to only at init time, and read only after that. +var metricsRegistry = make(map[Metric]*MetricDescriptor) + +// DescriptorForMetric returns the MetricDescriptor from the global registry. +// +// Returns nil if MetricDescriptor not present. +func DescriptorForMetric(metric Metric) *MetricDescriptor { + return metricsRegistry[metric] +} + +func registerMetric(name Metric, def bool) { + if registeredMetrics[name] { + logger.Fatalf("metric %v already registered", name) + } + registeredMetrics[name] = true + if def { + DefaultMetrics = DefaultMetrics.Add(name) + } +} + +// RegisterInt64Count registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64CountHandle)(descPtr) +} + +// RegisterFloat64Count registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64CountHandle)(descPtr) +} + +// RegisterInt64Histo registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64HistoHandle)(descPtr) +} + +// RegisterFloat64Histo registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64HistoHandle)(descPtr) +} + +// RegisterInt64Gauge registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntGauge + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64GaugeHandle)(descPtr) +} + +// snapshotMetricsRegistryForTesting snapshots the global data of the metrics +// registry. Returns a cleanup function that sets the metrics registry to its +// original state. +func snapshotMetricsRegistryForTesting() func() { + oldDefaultMetrics := DefaultMetrics + oldRegisteredMetrics := registeredMetrics + oldMetricsRegistry := metricsRegistry + + registeredMetrics = make(map[Metric]bool) + metricsRegistry = make(map[Metric]*MetricDescriptor) + maps.Copy(registeredMetrics, registeredMetrics) + maps.Copy(metricsRegistry, metricsRegistry) + + return func() { + DefaultMetrics = oldDefaultMetrics + registeredMetrics = oldRegisteredMetrics + metricsRegistry = oldMetricsRegistry + } +} diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go new file mode 100644 index 0000000000000..3221f7a633a37 --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -0,0 +1,114 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats contains experimental metrics/stats API's. +package stats + +import "maps" + +// MetricsRecorder records on metrics derived from metric registry. +type MetricsRecorder interface { + // RecordInt64Count records the measurement alongside labels on the int + // count associated with the provided handle. + RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string) + // RecordFloat64Count records the measurement alongside labels on the float + // count associated with the provided handle. + RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string) + // RecordInt64Histo records the measurement alongside labels on the int + // histo associated with the provided handle. + RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string) + // RecordFloat64Histo records the measurement alongside labels on the float + // histo associated with the provided handle. + RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string) + // RecordInt64Gauge records the measurement alongside labels on the int + // gauge associated with the provided handle. + RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) +} + +// Metric is an identifier for a metric. +type Metric string + +// Metrics is a set of metrics to record. Once created, Metrics is immutable, +// however Add and Remove can make copies with specific metrics added or +// removed, respectively. +// +// Do not construct directly; use NewMetrics instead. +type Metrics struct { + // metrics are the set of metrics to initialize. + metrics map[Metric]bool +} + +// NewMetrics returns a Metrics containing Metrics. +func NewMetrics(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Metrics returns the metrics set. The returned map is read-only and must not +// be modified. +func (m *Metrics) Metrics() map[Metric]bool { + return m.metrics +} + +// Add adds the metrics to the metrics set and returns a new copy with the +// additional metrics. +func (m *Metrics) Add(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Join joins the metrics passed in with the metrics set, and returns a new copy +// with the merged metrics. +func (m *Metrics) Join(metrics *Metrics) *Metrics { + newMetrics := make(map[Metric]bool) + maps.Copy(newMetrics, m.metrics) + maps.Copy(newMetrics, metrics.metrics) + return &Metrics{ + metrics: newMetrics, + } +} + +// Remove removes the metrics from the metrics set and returns a new copy with +// the metrics removed. +func (m *Metrics) Remove(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + delete(newMetrics, metric) + } + return &Metrics{ + metrics: newMetrics, + } +} diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go index ac73c9ced2553..f1ae080dcb816 100644 --- a/vendor/google.golang.org/grpc/grpclog/component.go +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -20,8 +20,6 @@ package grpclog import ( "fmt" - - "google.golang.org/grpc/internal/grpclog" ) // componentData records the settings for a component. @@ -33,22 +31,22 @@ var cache = map[string]*componentData{} func (c *componentData) InfoDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.InfoDepth(depth+1, args...) + InfoDepth(depth+1, args...) } func (c *componentData) WarningDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.WarningDepth(depth+1, args...) + WarningDepth(depth+1, args...) } func (c *componentData) ErrorDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.ErrorDepth(depth+1, args...) + ErrorDepth(depth+1, args...) } func (c *componentData) FatalDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.FatalDepth(depth+1, args...) + FatalDepth(depth+1, args...) } func (c *componentData) Info(args ...any) { diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index 16928c9cb993c..db320105e64e2 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -18,18 +18,15 @@ // Package grpclog defines logging for grpc. // -// All logs in transport and grpclb packages only go to verbose level 2. -// All logs in other packages in grpc are logged in spite of the verbosity level. -// -// In the default logger, -// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, -// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. -package grpclog // import "google.golang.org/grpc/grpclog" +// In the default logger, severity level can be set by environment variable +// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by +// GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog import ( "os" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) func init() { @@ -38,58 +35,58 @@ func init() { // V reports whether verbosity level l is at least the requested verbose level. func V(l int) bool { - return grpclog.Logger.V(l) + return internal.LoggerV2Impl.V(l) } // Info logs to the INFO log. func Info(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. func Infof(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. func Infoln(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) } // Warning logs to the WARNING log. func Warning(args ...any) { - grpclog.Logger.Warning(args...) + internal.LoggerV2Impl.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. func Warningf(format string, args ...any) { - grpclog.Logger.Warningf(format, args...) + internal.LoggerV2Impl.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. func Warningln(args ...any) { - grpclog.Logger.Warningln(args...) + internal.LoggerV2Impl.Warningln(args...) } // Error logs to the ERROR log. func Error(args ...any) { - grpclog.Logger.Error(args...) + internal.LoggerV2Impl.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. func Errorf(format string, args ...any) { - grpclog.Logger.Errorf(format, args...) + internal.LoggerV2Impl.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. func Errorln(args ...any) { - grpclog.Logger.Errorln(args...) + internal.LoggerV2Impl.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. func Fatal(args ...any) { - grpclog.Logger.Fatal(args...) + internal.LoggerV2Impl.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -97,15 +94,15 @@ func Fatal(args ...any) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. func Fatalf(format string, args ...any) { - grpclog.Logger.Fatalf(format, args...) + internal.LoggerV2Impl.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) } // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. -// It calle os.Exit()) with exit code 1. +// It calls os.Exit() with exit code 1. func Fatalln(args ...any) { - grpclog.Logger.Fatalln(args...) + internal.LoggerV2Impl.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -114,19 +111,76 @@ func Fatalln(args ...any) { // // Deprecated: use Info. func Print(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. func Printf(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. func Println(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) +} + +// InfoDepth logs to the INFO log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func InfoDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.InfoDepth(depth, args...) + } else { + internal.LoggerV2Impl.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WarningDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.WarningDepth(depth, args...) + } else { + internal.LoggerV2Impl.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ErrorDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.ErrorDepth(depth, args...) + } else { + internal.LoggerV2Impl.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func FatalDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.FatalDepth(depth, args...) + } else { + internal.LoggerV2Impl.Fatalln(args...) + } + os.Exit(1) } diff --git a/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go new file mode 100644 index 0000000000000..59c03bc14c2a9 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the grpclog package. +package internal + +// LoggerV2Impl is the logger used for the non-depth log functions. +var LoggerV2Impl LoggerV2 + +// DepthLoggerV2Impl is the logger used for the depth log functions. +var DepthLoggerV2Impl DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go new file mode 100644 index 0000000000000..e524fdd40b236 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/logger.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) +} + +// LoggerWrapper wraps Logger into a LoggerV2. +type LoggerWrapper struct { + Logger +} + +// Info logs to INFO log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Info(args ...any) { + l.Logger.Print(args...) +} + +// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Infoln(args ...any) { + l.Logger.Println(args...) +} + +// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Infof(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Warning(args ...any) { + l.Logger.Print(args...) +} + +// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Warningln(args ...any) { + l.Logger.Println(args...) +} + +// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Warningf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Error(args ...any) { + l.Logger.Print(args...) +} + +// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Errorln(args ...any) { + l.Logger.Println(args...) +} + +// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Errorf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (*LoggerWrapper) V(int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go similarity index 52% rename from vendor/google.golang.org/grpc/internal/grpclog/grpclog.go rename to vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go index bfc45102ab245..07df71e98a87a 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,59 +16,17 @@ * */ -// Package grpclog (internal) defines depth logging for grpc. -package grpclog +package internal import ( + "encoding/json" + "fmt" + "io" + "log" "os" ) -// Logger is the logger used for the non-depth log functions. -var Logger LoggerV2 - -// DepthLogger is the logger used for the depth log functions. -var DepthLogger DepthLoggerV2 - -// InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.InfoDepth(depth, args...) - } else { - Logger.Infoln(args...) - } -} - -// WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.WarningDepth(depth, args...) - } else { - Logger.Warningln(args...) - } -} - -// ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.ErrorDepth(depth, args...) - } else { - Logger.Errorln(args...) - } -} - -// FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.FatalDepth(depth, args...) - } else { - Logger.Fatalln(args...) - } - os.Exit(1) -} - // LoggerV2 does underlying logging work for grpclog. -// This is a copy of the LoggerV2 defined in the external grpclog package. It -// is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. Info(args ...any) @@ -107,14 +65,13 @@ type LoggerV2 interface { // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. -// This is a copy of the DepthLoggerV2 defined in the external grpclog package. -// It is defined here to avoid a circular dependency. // // # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { + LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. @@ -124,3 +81,124 @@ type DepthLoggerV2 interface { // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...any) } + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int + jsonFormat bool +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) +} + +func (g *loggerT) Info(args ...any) { + g.output(infoLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Infoln(args ...any) { + g.output(infoLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Infof(format string, args ...any) { + g.output(infoLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Warning(args ...any) { + g.output(warningLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Warningln(args ...any) { + g.output(warningLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Warningf(format string, args ...any) { + g.output(warningLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Error(args ...any) { + g.output(errorLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Errorln(args ...any) { + g.output(errorLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Errorf(format string, args ...any) { + g.output(errorLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Fatal(args ...any) { + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalln(args ...any) { + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalf(format string, args ...any) { + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} + +// LoggerV2Config configures the LoggerV2 implementation. +type LoggerV2Config struct { + // Verbosity sets the verbosity level of the logger. + Verbosity int + // FormatJSON controls whether the logger should output logs in JSON format. + FormatJSON bool +} + +// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. +// The infoW, warningW, and errorW writers are used to write log messages of +// different severity levels. +func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { + var m []*log.Logger + flag := log.LstdFlags + if c.FormatJSON { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index b1674d8267ca4..4b203585707af 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -18,70 +18,17 @@ package grpclog -import "google.golang.org/grpc/internal/grpclog" +import "google.golang.org/grpc/grpclog/internal" // Logger mimics golang's standard Logger as an interface. // // Deprecated: use LoggerV2. -type Logger interface { - Fatal(args ...any) - Fatalf(format string, args ...any) - Fatalln(args ...any) - Print(args ...any) - Printf(format string, args ...any) - Println(args ...any) -} +type Logger internal.Logger // SetLogger sets the logger that is used in grpc. Call only from // init() functions. // // Deprecated: use SetLoggerV2. func SetLogger(l Logger) { - grpclog.Logger = &loggerWrapper{Logger: l} -} - -// loggerWrapper wraps Logger into a LoggerV2. -type loggerWrapper struct { - Logger -} - -func (g *loggerWrapper) Info(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Infoln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Infof(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Warning(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Warningln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Warningf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Error(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Errorln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Errorf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) V(l int) bool { - // Returns true for all verbose level. - return true + internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l} } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index ecfd36d713032..892dc13d164b9 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -19,52 +19,16 @@ package grpclog import ( - "encoding/json" - "fmt" "io" - "log" "os" "strconv" "strings" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) // LoggerV2 does underlying logging work for grpclog. -type LoggerV2 interface { - // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...any) - // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...any) - // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...any) - // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...any) - // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...any) - // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...any) - // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...any) - // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...any) - // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...any) - // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...any) - // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...any) - // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...any) - // V reports whether verbosity level l is at least the requested verbose level. - V(l int) bool -} +type LoggerV2 internal.LoggerV2 // SetLoggerV2 sets logger that is used in grpc to a V2 logger. // Not mutex-protected, should be called before any gRPC functions. @@ -72,34 +36,8 @@ func SetLoggerV2(l LoggerV2) { if _, ok := l.(*componentData); ok { panic("cannot use component logger as grpclog logger") } - grpclog.Logger = l - grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) -} - -const ( - // infoLog indicates Info severity. - infoLog int = iota - // warningLog indicates Warning severity. - warningLog - // errorLog indicates Error severity. - errorLog - // fatalLog indicates Fatal severity. - fatalLog -) - -// severityName contains the string representation of each severity. -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// loggerT is the default logger used by grpclog. -type loggerT struct { - m []*log.Logger - v int - jsonFormat bool + internal.LoggerV2Impl = l + internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2) } // NewLoggerV2 creates a loggerV2 with the provided writers. @@ -108,32 +46,13 @@ type loggerT struct { // Warning logs will be written to warningW and infoW. // Info logs will be written to infoW. func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{}) } // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and // verbosity level. func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) -} - -type loggerV2Config struct { - verbose int - jsonFormat bool -} - -func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { - var m []*log.Logger - flag := log.LstdFlags - if c.jsonFormat { - flag = 0 - } - m = append(m, log.New(infoW, "", flag)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, "", flag)) - m = append(m, log.New(ew, "", flag)) - return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v}) } // newLoggerV2 creates a loggerV2 to be used as default logger. @@ -161,80 +80,10 @@ func newLoggerV2() LoggerV2 { jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ - verbose: v, - jsonFormat: jsonFormat, - }) -} - -func (g *loggerT) output(severity int, s string) { - sevStr := severityName[severity] - if !g.jsonFormat { - g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) - return - } - // TODO: we can also include the logging component, but that needs more - // (API) changes. - b, _ := json.Marshal(map[string]string{ - "severity": sevStr, - "message": s, + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{ + Verbosity: v, + FormatJSON: jsonFormat, }) - g.m[severity].Output(2, string(b)) -} - -func (g *loggerT) Info(args ...any) { - g.output(infoLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Infoln(args ...any) { - g.output(infoLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Infof(format string, args ...any) { - g.output(infoLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Warning(args ...any) { - g.output(warningLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Warningln(args ...any) { - g.output(warningLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Warningf(format string, args ...any) { - g.output(warningLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Error(args ...any) { - g.output(errorLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Errorln(args ...any) { - g.output(errorLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Errorf(format string, args ...any) { - g.output(errorLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Fatal(args ...any) { - g.output(fatalLog, fmt.Sprint(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalln(args ...any) { - g.output(fatalLog, fmt.Sprintln(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalf(format string, args ...any) { - g.output(fatalLog, fmt.Sprintf(format, args...)) - os.Exit(1) -} - -func (g *loggerT) V(l int) bool { - return l <= g.v } // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements @@ -245,14 +94,4 @@ func (g *loggerT) V(l int) bool { // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. -type DepthLoggerV2 interface { - LoggerV2 - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...any) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...any) - // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...any) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...any) -} +type DepthLoggerV2 internal.DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 38b8835073502..d92335445f650 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/health/v1/health.proto package grpc_health_v1 @@ -237,7 +237,7 @@ func file_grpc_health_v1_health_proto_rawDescGZIP() []byte { var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_health_v1_health_proto_goTypes = []interface{}{ +var file_grpc_health_v1_health_proto_goTypes = []any{ (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse @@ -261,7 +261,7 @@ func file_grpc_health_v1_health_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*HealthCheckRequest); i { case 0: return &v.state @@ -273,7 +273,7 @@ func file_grpc_health_v1_health_proto_init() { return nil } } - file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*HealthCheckResponse); i { case 0: return &v.state diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 51b736ba06e5f..f96b8ab4927e1 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/health/v1/health.proto package grpc_health_v1 @@ -32,8 +32,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" @@ -73,7 +73,7 @@ type HealthClient interface { // should assume this method is not supported and should not retry the // call. If the call terminates with any other status (including OK), // clients should retry the call with appropriate exponential backoff. - Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) + Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) } type healthClient struct { @@ -94,13 +94,13 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . return out, nil } -func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { +func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &healthWatchClient{ClientStream: stream} + x := &grpc.GenericClientStream[HealthCheckRequest, HealthCheckResponse]{ClientStream: stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -110,26 +110,12 @@ func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts . return x, nil } -type Health_WatchClient interface { - Recv() (*HealthCheckResponse, error) - grpc.ClientStream -} - -type healthWatchClient struct { - grpc.ClientStream -} - -func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { - m := new(HealthCheckResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type Health_WatchClient = grpc.ServerStreamingClient[HealthCheckResponse] // HealthServer is the server API for Health service. // All implementations should embed UnimplementedHealthServer -// for forward compatibility +// for forward compatibility. // // Health is gRPC's mechanism for checking whether a server is able to handle // RPCs. Its semantics are documented in @@ -160,19 +146,23 @@ type HealthServer interface { // should assume this method is not supported and should not retry the // call. If the call terminates with any other status (including OK), // clients should retry the call with appropriate exponential backoff. - Watch(*HealthCheckRequest, Health_WatchServer) error + Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error } -// UnimplementedHealthServer should be embedded to have forward compatible implementations. -type UnimplementedHealthServer struct { -} +// UnimplementedHealthServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedHealthServer struct{} func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") } -func (UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error { +func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error { return status.Errorf(codes.Unimplemented, "method Watch not implemented") } +func (UnimplementedHealthServer) testEmbeddedByValue() {} // UnsafeHealthServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to HealthServer will @@ -182,6 +172,13 @@ type UnsafeHealthServer interface { } func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) { + // If the following call panics, it indicates UnimplementedHealthServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&Health_ServiceDesc, srv) } @@ -208,21 +205,11 @@ func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { if err := stream.RecvMsg(m); err != nil { return err } - return srv.(HealthServer).Watch(m, &healthWatchServer{ServerStream: stream}) -} - -type Health_WatchServer interface { - Send(*HealthCheckResponse) error - grpc.ServerStream + return srv.(HealthServer).Watch(m, &grpc.GenericServerStream[HealthCheckRequest, HealthCheckResponse]{ServerStream: stream}) } -type healthWatchServer struct { - grpc.ServerStream -} - -func (x *healthWatchServer) Send(m *HealthCheckResponse) error { - return x.ServerStream.SendMsg(m) -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type Health_WatchServer = grpc.ServerStreamingServer[HealthCheckResponse] // Health_ServiceDesc is the grpc.ServiceDesc for Health service. // It's only intended for direct use with grpc.RegisterService, diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go index cce6312d77f9c..d4b4b7081590e 100644 --- a/vendor/google.golang.org/grpc/health/server.go +++ b/vendor/google.golang.org/grpc/health/server.go @@ -51,7 +51,7 @@ func NewServer() *Server { } // Check implements `service Health`. -func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { +func (s *Server) Check(_ context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { s.mu.RLock() defer s.mu.RUnlock() if servingStatus, ok := s.statusMap[in.Service]; ok { diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index aa4505a871dfb..9669328914ad4 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -106,7 +106,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry } // Log creates a proto binary log entry, and logs it to the sink. -func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { +func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go index dfe18b08925d9..64c791953d017 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go +++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go @@ -46,7 +46,7 @@ type entry interface { // channelMap is the storage data structure for channelz. // -// Methods of channelMap can be divided in two two categories with respect to +// Methods of channelMap can be divided into two categories with respect to // locking. // // 1. Methods acquire the global lock. @@ -234,13 +234,6 @@ func copyMap(m map[int64]string) map[int64]string { return n } -func min(a, b int) int { - if a < b { - return a - } - return b -} - func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) { if maxResults <= 0 { maxResults = EntriesPerPage diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 03e24e1507aa6..078bb81238bc4 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -33,7 +33,7 @@ var ( // outside this package except by tests. IDGen IDGenerator - db *channelMap = newChannelMap() + db = newChannelMap() // EntriesPerPage defines the number of channelz entries to be shown on a web page. EntriesPerPage = 50 curState int32 diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go index d1ed8df6a5186..0e6e18e185c7a 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go @@ -35,13 +35,13 @@ type SocketOptionData struct { // Getsockopt defines the function to get socket options requested by channelz. // It is to be passed to syscall.RawConn.Control(). // Windows OS doesn't support Socket Option -func (s *SocketOptionData) Getsockopt(fd uintptr) { +func (s *SocketOptionData) Getsockopt(uintptr) { once.Do(func() { logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c any) *SocketOptionData { +func GetSocketOption(any) *SocketOptionData { return nil } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index d906487139445..452985f8d8f1b 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -45,7 +45,11 @@ var ( // option is present for backward compatibility. This option may be overridden // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" // or "false". - EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", false) + EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) + // XDSFallbackSupport is the env variable that controls whether support for + // xDS fallback is turned on. If this is unset or is false, only the first + // xDS server in the list of server configs will be used. + XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go index 7f7044e1731c8..7617be2158957 100644 --- a/vendor/google.golang.org/grpc/internal/experimental.go +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -18,11 +18,11 @@ package internal var ( - // WithRecvBufferPool is implemented by the grpc package and returns a dial + // WithBufferPool is implemented by the grpc package and returns a dial // option to configure a shared buffer pool for a grpc.ClientConn. - WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption - // RecvBufferPool is implemented by the grpc package and returns a server + // BufferPool is implemented by the grpc package and returns a server // option to configure a shared buffer pool for a grpc.Server. - RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption + BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption ) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go similarity index 63% rename from vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go rename to vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go index faa998de7632b..092ad187a2c8d 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go @@ -16,17 +16,21 @@ * */ +// Package grpclog provides logging functionality for internal gRPC packages, +// outside of the functionality provided by the external `grpclog` package. package grpclog import ( "fmt" + + "google.golang.org/grpc/grpclog" ) // PrefixLogger does logging with a prefix. // // Logging method on a nil logs without any prefix. type PrefixLogger struct { - logger DepthLoggerV2 + logger grpclog.DepthLoggerV2 prefix string } @@ -38,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...any) { pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) return } - InfoDepth(1, fmt.Sprintf(format, args...)) + grpclog.InfoDepth(1, fmt.Sprintf(format, args...)) } // Warningf does warning logging. @@ -48,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...any) { pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) return } - WarningDepth(1, fmt.Sprintf(format, args...)) + grpclog.WarningDepth(1, fmt.Sprintf(format, args...)) } // Errorf does error logging. @@ -58,36 +62,18 @@ func (pl *PrefixLogger) Errorf(format string, args ...any) { pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) return } - ErrorDepth(1, fmt.Sprintf(format, args...)) -} - -// Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...any) { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - if !Logger.V(2) { - return - } - if pl != nil { - // Handle nil, so the tests can pass in a nil logger. - format = pl.prefix + format - pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) - return - } - InfoDepth(1, fmt.Sprintf(format, args...)) - + grpclog.ErrorDepth(1, fmt.Sprintf(format, args...)) } // V reports whether verbosity level l is at least the requested verbose level. func (pl *PrefixLogger) V(l int) bool { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - return Logger.V(l) + if pl != nil { + return pl.logger.V(l) + } + return true } // NewPrefixLogger creates a prefix logger with the given prefix. -func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { +func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger { return &PrefixLogger{logger: logger, prefix: prefix} } diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index f7f40a16acee5..19b9d639275a8 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -53,16 +53,28 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { return cs } -// Schedule adds a callback to be scheduled after existing callbacks are run. +// TrySchedule tries to schedules the provided callback function f to be +// executed in the order it was added. This is a best-effort operation. If the +// context passed to NewCallbackSerializer was canceled before this method is +// called, the callback will not be scheduled. // // Callbacks are expected to honor the context when performing any blocking // operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) { + cs.callbacks.Put(f) +} + +// ScheduleOr schedules the provided callback function f to be executed in the +// order it was added. If the context passed to NewCallbackSerializer has been +// canceled before this method is called, the onFailure callback will be +// executed inline instead. // -// Return value indicates if the callback was successfully added to the list of -// callbacks to be executed by the serializer. It is not possible to add -// callbacks once the context passed to NewCallbackSerializer is cancelled. -func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - return cs.callbacks.Put(f) == nil +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) { + if cs.callbacks.Put(f) != nil { + onFailure() + } } func (cs *CallbackSerializer) run(ctx context.Context) { diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go index aef8cec1ab0cd..6d8c2f518dff7 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -77,7 +77,7 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { if ps.msg != nil { msg := ps.msg - ps.cs.Schedule(func(context.Context) { + ps.cs.TrySchedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[sub] { @@ -103,7 +103,7 @@ func (ps *PubSub) Publish(msg any) { ps.msg = msg for sub := range ps.subscribers { s := sub - ps.cs.Schedule(func(context.Context) { + ps.cs.TrySchedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[s] { diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 5d66539869232..7aae9240ffc07 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -183,7 +183,7 @@ var ( // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra // metadata to RPCs. - GRPCResolverSchemeExtraMetadata string = "xds" + GRPCResolverSchemeExtraMetadata = "xds" // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. EnterIdleModeForTesting any // func(*grpc.ClientConn) @@ -203,11 +203,31 @@ var ( // UserSetDefaultScheme is set to true if the user has overridden the // default resolver scheme. - UserSetDefaultScheme bool = false + UserSetDefaultScheme = false // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n // is the number of elements. swap swaps the elements with indexes i and j. ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) + + // ConnectedAddress returns the connected address for a SubConnState. The + // address is only valid if the state is READY. + ConnectedAddress any // func (scs SubConnState) resolver.Address + + // SetConnectedAddress sets the connected address for a SubConnState. + SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address) + + // SnapshotMetricRegistryForTesting snapshots the global data of the metric + // registry. Returns a cleanup function that sets the metric registry to its + // original state. Only called in testing functions. + SnapshotMetricRegistryForTesting func() func() + + // SetDefaultBufferPoolForTesting updates the default buffer pool, for + // testing purposes. + SetDefaultBufferPoolForTesting any // func(mem.BufferPool) + + // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for + // testing purposes. + SetBufferPoolingThresholdForTesting any // func(int) ) // HealthChecker defines the signature of the client-side LB channel health diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index afac56572ad55..b901c7bace506 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -55,7 +55,7 @@ func (r *passthroughResolver) start() { r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) } -func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} +func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {} func (*passthroughResolver) Close() {} diff --git a/vendor/google.golang.org/grpc/internal/stats/labels.go b/vendor/google.golang.org/grpc/internal/stats/labels.go new file mode 100644 index 0000000000000..fd33af51ae896 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/labels.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats provides internal stats related functionality. +package stats + +import "context" + +// Labels are the labels for metrics. +type Labels struct { + // TelemetryLabels are the telemetry labels to record. + TelemetryLabels map[string]string +} + +type labelsKey struct{} + +// GetLabels returns the Labels stored in the context, or nil if there is one. +func GetLabels(ctx context.Context) *Labels { + labels, _ := ctx.Value(labelsKey{}).(*Labels) + return labels +} + +// SetLabels sets the Labels in the context. +func SetLabels(ctx context.Context, labels *Labels) context.Context { + // could also append + return context.WithValue(ctx, labelsKey{}, labels) +} diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go new file mode 100644 index 0000000000000..be110d41f9a42 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -0,0 +1,95 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package stats + +import ( + "fmt" + + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/stats" +) + +// MetricsRecorderList forwards Record calls to all of its metricsRecorders. +// +// It eats any record calls where the label values provided do not match the +// number of label keys. +type MetricsRecorderList struct { + // metricsRecorders are the metrics recorders this list will forward to. + metricsRecorders []estats.MetricsRecorder +} + +// NewMetricsRecorderList creates a new metric recorder list with all the stats +// handlers provided which implement the MetricsRecorder interface. +// If no stats handlers provided implement the MetricsRecorder interface, +// the MetricsRecorder list returned is a no-op. +func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList { + var mrs []estats.MetricsRecorder + for _, sh := range shs { + if mr, ok := sh.(estats.MetricsRecorder); ok { + mrs = append(mrs, mr) + } + } + return &MetricsRecorderList{ + metricsRecorders: mrs, + } +} + +func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) { + if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want { + panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want)) + } +} + +func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Count(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Count(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Histo(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Histo(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Gauge(handle, incr, labels...) + } +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index c7dbc82059525..757925381fe75 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -138,11 +138,11 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { // s.Code() != OK implies that s.Proto() != nil. p := s.Proto() for _, detail := range details { - any, err := anypb.New(protoadapt.MessageV2Of(detail)) + m, err := anypb.New(protoadapt.MessageV2Of(detail)) if err != nil { return nil, err } - p.Details = append(p.Details, any) + p.Details = append(p.Details, m) } return &Status{s: p}, nil } diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index 999f52cd75bdb..54c24c2ff3865 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -58,20 +58,20 @@ func GetRusage() *Rusage { // CPUTimeDiff returns the differences of user CPU time and system CPU time used // between two Rusage structs. It a no-op function for non-linux environments. -func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { +func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) { log() return 0, 0 } // SetTCPUserTimeout is a no-op function under non-linux environments. -func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { +func SetTCPUserTimeout(net.Conn, time.Duration) error { log() return nil } // GetTCPUserTimeout is a no-op function under non-linux environments. // A negative return value indicates the operation is not supported -func GetTCPUserTimeout(conn net.Conn) (int, error) { +func GetTCPUserTimeout(net.Conn) (int, error) { log() return -1, nil } diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go index 078137b7fd705..7e7aaa5463683 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go index fd7d43a8907ba..d5c1085eeaecd 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 3deadfb4a20c9..ef72fbb3a0163 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -32,6 +32,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -148,9 +149,9 @@ type dataFrame struct { streamID uint32 endStream bool h []byte - d []byte + reader mem.Reader // onEachWrite is called every time - // a part of d is written out. + // a part of data is written out. onEachWrite func() } @@ -289,18 +290,22 @@ func (l *outStreamList) dequeue() *outStream { } // controlBuffer is a way to pass information to loopy. -// Information is passed as specific struct types called control frames. -// A control frame not only represents data, messages or headers to be sent out -// but can also be used to instruct loopy to update its internal state. -// It shouldn't be confused with an HTTP2 frame, although some of the control frames -// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +// +// Information is passed as specific struct types called control frames. A +// control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. It +// shouldn't be confused with an HTTP2 frame, although some of the control +// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames. type controlBuffer struct { - ch chan struct{} - done <-chan struct{} + wakeupCh chan struct{} // Unblocks readers waiting for something to read. + done <-chan struct{} // Closed when the transport is done. + + // Mutex guards all the fields below, except trfChan which can be read + // atomically without holding mu. mu sync.Mutex - consumerWaiting bool - list *itemList - err error + consumerWaiting bool // True when readers are blocked waiting for new data. + closed bool // True when the controlbuf is finished. + list *itemList // List of queued control frames. // transportResponseFrames counts the number of queued items that represent // the response of an action initiated by the peer. trfChan is created @@ -308,47 +313,59 @@ type controlBuffer struct { // closed and nilled when transportResponseFrames drops below the // threshold. Both fields are protected by mu. transportResponseFrames int - trfChan atomic.Value // chan struct{} + trfChan atomic.Pointer[chan struct{}] } func newControlBuffer(done <-chan struct{}) *controlBuffer { return &controlBuffer{ - ch: make(chan struct{}, 1), - list: &itemList{}, - done: done, + wakeupCh: make(chan struct{}, 1), + list: &itemList{}, + done: done, } } -// throttle blocks if there are too many incomingSettings/cleanupStreams in the -// controlbuf. +// throttle blocks if there are too many frames in the control buf that +// represent the response of an action initiated by the peer, like +// incomingSettings cleanupStreams etc. func (c *controlBuffer) throttle() { - ch, _ := c.trfChan.Load().(chan struct{}) - if ch != nil { + if ch := c.trfChan.Load(); ch != nil { select { - case <-ch: + case <-(*ch): case <-c.done: } } } +// put adds an item to the controlbuf. func (c *controlBuffer) put(it cbItem) error { _, err := c.executeAndPut(nil, it) return err } +// executeAndPut runs f, and if the return value is true, adds the given item to +// the controlbuf. The item could be nil, in which case, this method simply +// executes f and does not add the item to the controlbuf. +// +// The first return value indicates whether the item was successfully added to +// the control buffer. A non-nil error, specifically ErrConnClosing, is returned +// if the control buffer is already closed. func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { - var wakeUp bool c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err + defer c.mu.Unlock() + + if c.closed { + return false, ErrConnClosing } if f != nil { if !f() { // f wasn't successful - c.mu.Unlock() return false, nil } } + if it == nil { + return true, nil + } + + var wakeUp bool if c.consumerWaiting { wakeUp = true c.consumerWaiting = false @@ -359,98 +376,102 @@ func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are adding the frame that puts us over the threshold; create // a throttling channel. - c.trfChan.Store(make(chan struct{})) + ch := make(chan struct{}) + c.trfChan.Store(&ch) } } - c.mu.Unlock() if wakeUp { select { - case c.ch <- struct{}{}: + case c.wakeupCh <- struct{}{}: default: } } return true, nil } -// Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err - } - if !f(it) { // f wasn't successful - c.mu.Unlock() - return false, nil - } - c.mu.Unlock() - return true, nil -} - +// get returns the next control frame from the control buffer. If block is true +// **and** there are no control frames in the control buffer, the call blocks +// until one of the conditions is met: there is a frame to return or the +// transport is closed. func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() - if c.err != nil { + frame, err := c.getOnceLocked() + if frame != nil || err != nil || !block { + // If we read a frame or an error, we can return to the caller. The + // call to getOnceLocked() returns a nil frame and a nil error if + // there is nothing to read, and in that case, if the caller asked + // us not to block, we can return now as well. c.mu.Unlock() - return nil, c.err - } - if !c.list.isEmpty() { - h := c.list.dequeue().(cbItem) - if h.isTransportResponseFrame() { - if c.transportResponseFrames == maxQueuedTransportResponseFrames { - // We are removing the frame that put us over the - // threshold; close and clear the throttling channel. - ch := c.trfChan.Load().(chan struct{}) - close(ch) - c.trfChan.Store((chan struct{})(nil)) - } - c.transportResponseFrames-- - } - c.mu.Unlock() - return h, nil - } - if !block { - c.mu.Unlock() - return nil, nil + return frame, err } c.consumerWaiting = true c.mu.Unlock() + + // Release the lock above and wait to be woken up. select { - case <-c.ch: + case <-c.wakeupCh: case <-c.done: return nil, errors.New("transport closed by client") } } } +// Callers must not use this method, but should instead use get(). +// +// Caller must hold c.mu. +func (c *controlBuffer) getOnceLocked() (any, error) { + if c.closed { + return false, ErrConnClosing + } + if c.list.isEmpty() { + return nil, nil + } + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Swap(nil) + close(*ch) + } + c.transportResponseFrames-- + } + return h, nil +} + +// finish closes the control buffer, cleaning up any streams that have queued +// header frames. Once this method returns, no more frames can be added to the +// control buffer, and attempts to do so will return ErrConnClosing. func (c *controlBuffer) finish() { c.mu.Lock() - if c.err != nil { - c.mu.Unlock() + defer c.mu.Unlock() + + if c.closed { return } - c.err = ErrConnClosing + c.closed = true // There may be headers for streams in the control buffer. // These streams need to be cleaned out since the transport // is still not aware of these yet. for head := c.list.dequeueAll(); head != nil; head = head.next { - hdr, ok := head.it.(*headerFrame) - if !ok { - continue - } - if hdr.onOrphaned != nil { // It will be nil on the server-side. - hdr.onOrphaned(ErrConnClosing) + switch v := head.it.(type) { + case *headerFrame: + if v.onOrphaned != nil { // It will be nil on the server-side. + v.onOrphaned(ErrConnClosing) + } + case *dataFrame: + _ = v.reader.Close() } } + // In case throttle() is currently in flight, it needs to be unblocked. // Otherwise, the transport may not close, since the transport is closed by // the reader encountering the connection error. - ch, _ := c.trfChan.Load().(chan struct{}) + ch := c.trfChan.Swap(nil) if ch != nil { - close(ch) + close(*ch) } - c.trfChan.Store((chan struct{})(nil)) - c.mu.Unlock() } type side int @@ -466,7 +487,7 @@ const ( // stream maintains a queue of data frames; as loopy receives data frames // it gets added to the queue of the relevant stream. // Loopy goes over this list of active streams by processing one node every iteration, -// thereby closely resemebling to a round-robin scheduling over all streams. While +// thereby closely resembling a round-robin scheduling over all streams. While // processing a stream, loopy writes out data bytes from this stream capped by the min // of http2MaxFrameLen, connection-level flow control and stream-level flow control. type loopyWriter struct { @@ -490,12 +511,13 @@ type loopyWriter struct { draining bool conn net.Conn logger *grpclog.PrefixLogger + bufferPool mem.BufferPool // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -511,6 +533,7 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato conn: conn, logger: logger, ssGoAwayHandler: goAwayHandler, + bufferPool: bufferPool, } return l } @@ -768,6 +791,11 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { // not be established yet. delete(l.estdStreams, c.streamID) str.deleteSelf() + for head := str.itl.dequeueAll(); head != nil; head = head.next { + if df, ok := head.it.(*dataFrame); ok { + _ = df.reader.Close() + } + } } if c.rst { // If RST_STREAM needs to be sent. if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { @@ -903,16 +931,18 @@ func (l *loopyWriter) processData() (bool, error) { dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. - // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possible HTTP2 frame size. + // Every dataFrame has two buffers; h that keeps grpc-message header and data + // that is the actual message. As an optimization to keep wire traffic low, data + // from data is copied to h to make as big as the maximum possible HTTP2 frame + // size. - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame // Client sends out empty data frame with endStream = true if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } str.itl.dequeue() // remove the empty data item from stream + _ = dataItem.reader.Close() if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -927,9 +957,7 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - var ( - buf []byte - ) + // Figure out the maximum size we can send maxSize := http2MaxFrameLen if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. @@ -943,43 +971,50 @@ func (l *loopyWriter) processData() (bool, error) { } // Compute how much of the header and data we can send within quota and max frame length hSize := min(maxSize, len(dataItem.h)) - dSize := min(maxSize-hSize, len(dataItem.d)) - if hSize != 0 { - if dSize == 0 { - buf = dataItem.h - } else { - // We can add some data to grpc message header to distribute bytes more equally across frames. - // Copy on the stack to avoid generating garbage - var localBuf [http2MaxFrameLen]byte - copy(localBuf[:hSize], dataItem.h) - copy(localBuf[hSize:], dataItem.d[:dSize]) - buf = localBuf[:hSize+dSize] - } + dSize := min(maxSize-hSize, dataItem.reader.Remaining()) + remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize + size := hSize + dSize + + var buf *[]byte + + if hSize != 0 && dSize == 0 { + buf = &dataItem.h } else { - buf = dataItem.d - } + // Note: this is only necessary because the http2.Framer does not support + // partially writing a frame, so the sequence must be materialized into a buffer. + // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed. + pool := l.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + buf = pool.Get(size) + defer pool.Put(buf) - size := hSize + dSize + copy((*buf)[:hSize], dataItem.h) + _, _ = dataItem.reader.Read((*buf)[hSize:]) + } // Now that outgoing flow controls are checked we can replenish str's write quota str.wq.replenish(size) var endStream bool // If this is the last data message on this stream and all of it can be written in this iteration. - if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { + if dataItem.endStream && remainingBytes == 0 { endStream = true } if dataItem.onEachWrite != nil { dataItem.onEachWrite() } - if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil { return false, err } str.bytesOutStanding += size l.sendQuota -= uint32(size) dataItem.h = dataItem.h[hSize:] - dataItem.d = dataItem.d[dSize:] - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + if remainingBytes == 0 { // All the data from that message was written out. + _ = dataItem.reader.Close() str.itl.dequeue() } if str.itl.isEmpty() { @@ -998,10 +1033,3 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 4a3ddce29a4e7..ce878693bd741 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -24,7 +24,6 @@ package transport import ( - "bytes" "context" "errors" "fmt" @@ -40,6 +39,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -50,7 +50,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC from // inside an http.Handler, or writes an HTTP error to w and returns an error. // It requires that the http Server supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { if r.Method != http.MethodPost { w.Header().Set("Allow", http.MethodPost) msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) @@ -98,6 +98,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s contentType: contentType, contentSubtype: contentSubtype, stats: stats, + bufferPool: bufferPool, } st.logger = prefixLoggerForServerHandlerTransport(st) @@ -171,6 +172,8 @@ type serverHandlerTransport struct { stats []stats.Handler logger *grpclog.PrefixLogger + + bufferPool mem.BufferPool } func (ht *serverHandlerTransport) Close(err error) { @@ -244,6 +247,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } s.hdrMu.Lock() + defer s.hdrMu.Unlock() if p := st.Proto(); p != nil && len(p.Details) > 0 { delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) @@ -268,7 +272,6 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } - s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -330,16 +333,28 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { s.hdrMu.Unlock() } -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + // Always take a reference because otherwise there is no guarantee the data will + // be available after this function returns. This is what callers to Write + // expect. + data.Ref() headersWritten := s.updateHeaderSent() - return ht.do(func() { + err := ht.do(func() { + defer data.Free() if !headersWritten { ht.writePendingHeaders(s) } ht.rw.Write(hdr) - ht.rw.Write(data) + for _, b := range data { + _, _ = ht.rw.Write(b.ReadOnlyData()) + } ht.rw.(http.Flusher).Flush() }) + if err != nil { + data.Free() + return err + } + return nil } func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { @@ -406,7 +421,7 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, windowHandler: func(int) {}, } @@ -415,21 +430,19 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream go func() { defer close(readerDone) - // TODO: minimize garbage, optimize recvBuffer code/ownership - const readSize = 8196 - for buf := make([]byte, readSize); ; { - n, err := req.Body.Read(buf) + for { + buf := ht.bufferPool.Get(http2MaxFrameLen) + n, err := req.Body.Read(*buf) if n > 0 { - s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) - buf = buf[n:] + *buf = (*buf)[:n] + s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)}) + } else { + ht.bufferPool.Put(buf) } if err != nil { s.buf.put(recvMsg{err: mapRecvMsgError(err)}) return } - if len(buf) == 0 { - buf = make([]byte, readSize) - } } }() @@ -462,7 +475,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain(debugData string) { +func (ht *serverHandlerTransport) Drain(string) { panic("Drain() is not implemented") } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 3c63c706986da..c769deab53c77 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -47,6 +47,7 @@ import ( isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -59,6 +60,8 @@ import ( // atomically. var clientConnectionCounter uint64 +var goAwayLoopyWriterTimeout = 5 * time.Second + var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) // http2Client implements the ClientTransport interface with HTTP2. @@ -144,7 +147,7 @@ type http2Client struct { onClose func(GoAwayReason) - bufferPool *bufferPool + bufferPool mem.BufferPool connectionID uint64 logger *grpclog.PrefixLogger @@ -229,7 +232,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } }(conn) - // The following defer and goroutine monitor the connectCtx for cancelation + // The following defer and goroutine monitor the connectCtx for cancellation // and deadline. On context expiration, the connection is hard closed and // this function will naturally fail as a result. Otherwise, the defer // waits for the goroutine to exit to prevent the context from being @@ -346,7 +349,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), keepaliveEnabled: keepaliveEnabled, - bufferPool: newBufferPool(), + bufferPool: opts.BufferPool, onClose: onClose, } var czSecurity credentials.ChannelzSecurityValue @@ -463,7 +466,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) if err := t.loopy.run(); !isIOError(err) { // Immediately close the connection, as the loopy writer returns // when there are no more active streams and we were draining (the @@ -504,7 +507,6 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { closeStream: func(err error) { t.CloseStream(s, err) }, - freeBuffer: t.bufferPool.put, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -770,7 +772,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, hdr := &headerFrame{ hf: headerFields, endStream: false, - initStream: func(id uint32) error { + initStream: func(uint32) error { t.mu.Lock() // TODO: handle transport closure in loopy instead and remove this // initStream is never called when transport is draining. @@ -983,6 +985,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // only once on a transport. Once it is called, the transport should not be // accessed anymore. func (t *http2Client) Close(err error) { + t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10)) t.mu.Lock() // Make sure we only close once. if t.state == closing { @@ -1006,10 +1009,20 @@ func (t *http2Client) Close(err error) { t.kpDormancyCond.Signal() } t.mu.Unlock() + // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the - // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. + // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It + // also waits for loopyWriter to be closed with a timer to avoid the + // long blocking in case the connection is blackholed, i.e. TCP is + // just stuck. t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err}) - <-t.writerDone + timer := time.NewTimer(goAwayLoopyWriterTimeout) + defer timer.Stop() + select { + case <-t.writerDone: // success + case <-timer.C: + t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout) + } t.cancel() t.conn.Close() channelz.RemoveEntry(t.channelz.ID) @@ -1065,27 +1078,36 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { + reader := data.Reader() + if opts.Last { // If it's the last message, update stream state. if !s.compareAndSwapState(streamActive, streamWriteDone) { + _ = reader.Close() return errStreamDone } } else if s.getState() != streamActive { + _ = reader.Close() return errStreamDone } df := &dataFrame{ streamID: s.id, endStream: opts.Last, h: hdr, - d: data, + reader: reader, } - if hdr != nil || data != nil { // If it's not an empty data frame, check quota. - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() return err } } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil } func (t *http2Client) getStream(f http2.Frame) *Stream { @@ -1190,10 +1212,13 @@ func (t *http2Client) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } // The server has closed the stream without sending trailers. Record that @@ -1222,7 +1247,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { if statusCode == codes.Canceled { if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { // Our deadline was already exceeded, and that was likely the cause - // of this cancelation. Alter the status code accordingly. + // of this cancellation. Alter the status code accordingly. statusCode = codes.DeadlineExceeded } } @@ -1307,7 +1332,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { id := f.LastStreamID if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)) return } // A client can receive multiple GoAways from the server (see @@ -1642,11 +1667,10 @@ func (t *http2Client) reader(errCh chan<- error) { t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) } continue - } else { - // Transport error. - t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) - return } + // Transport error. + t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) + return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: @@ -1671,13 +1695,6 @@ func (t *http2Client) reader(errCh chan<- error) { } } -func minTime(a, b time.Duration) time.Duration { - if a < b { - return a - } - return b -} - // keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { p := &ping{data: [8]byte{}} @@ -1745,7 +1762,7 @@ func (t *http2Client) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, timeoutLeft) + sleepDuration := min(t.kp.Time, timeoutLeft) timeoutLeft -= sleepDuration timer.Reset(sleepDuration) case <-t.ctx.Done(): diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index b7091165b5013..584b50fe55302 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -39,6 +39,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/grpc/codes" @@ -119,7 +120,7 @@ type http2Server struct { // Fields below are for channelz metric collection. channelz *channelz.Socket - bufferPool *bufferPool + bufferPool mem.BufferPool connectionID uint64 @@ -261,7 +262,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, idle: time.Now(), kep: kep, initialWindowSize: iwz, - bufferPool: newBufferPool(), + bufferPool: config.BufferPool, } var czSecurity credentials.ChannelzSecurityValue if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { @@ -330,7 +331,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) err := t.loopy.run() close(t.loopyWriterDone) if !isIOError(err) { @@ -613,10 +614,9 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctxDone, - recv: s.buf, - freeBuffer: t.bufferPool.put, + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -813,10 +813,13 @@ func (t *http2Server) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } if f.StreamEnded() { @@ -1089,7 +1092,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { onWrite: t.setResetPingStrikes, } - success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + success, err := t.controlBuf.executeAndPut(func() bool { + return t.checkForHeaderListSize(trailingHeader) + }, nil) if !success { if err != nil { return err @@ -1112,27 +1117,37 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + reader := data.Reader() + if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { + _ = reader.Close() return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { + _ = reader.Close() return t.streamContextErr(s) } } + df := &dataFrame{ streamID: s.id, h: hdr, - d: data, + reader: reader, onEachWrite: t.setResetPingStrikes, } - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() return t.streamContextErr(s) } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil } // keepalive running in a separate goroutine does the following: @@ -1223,7 +1238,7 @@ func (t *http2Server) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) + sleepDuration := min(t.kp.Time, kpTimeoutLeft) kpTimeoutLeft -= sleepDuration kpTimer.Reset(sleepDuration) case <-t.done: diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 39cef3bd442eb..3613d7b64817d 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -317,28 +317,32 @@ func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { return w } -func (w *bufWriter) Write(b []byte) (n int, err error) { +func (w *bufWriter) Write(b []byte) (int, error) { if w.err != nil { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. - n, err = w.conn.Write(b) + n, err := w.conn.Write(b) return n, toIOError(err) } if w.buf == nil { b := w.pool.Get().(*[]byte) w.buf = *b } + written := 0 for len(b) > 0 { - nn := copy(w.buf[w.offset:], b) - b = b[nn:] - w.offset += nn - n += nn - if w.offset >= w.batchSize { - err = w.flushKeepBuffer() + copied := copy(w.buf[w.offset:], b) + b = b[copied:] + written += copied + w.offset += copied + if w.offset < w.batchSize { + continue + } + if err := w.flushKeepBuffer(); err != nil { + return written, err } } - return n, err + return written, nil } func (w *bufWriter) Flush() error { @@ -389,7 +393,7 @@ type framer struct { fr *http2.Framer } -var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferPoolMap = make(map[int]*sync.Pool) var writeBufferMutex sync.Mutex func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index 24fa1032574cb..54b2244365444 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -107,8 +107,14 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri } return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) } - - return &bufConn{Conn: conn, r: r}, nil + // The buffer could contain extra bytes from the target server, so we can't + // discard it. However, in many cases where the server waits for the client + // to send the first message (e.g. when TLS is being used), the buffer will + // be empty, so we can avoid the overhead of reading through this buffer. + if r.Buffered() != 0 { + return &bufConn{Conn: conn, r: r}, nil + } + return conn, nil } // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 4b39c0ade97c0..fdd6fa86cc15c 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -22,7 +22,6 @@ package transport import ( - "bytes" "context" "errors" "fmt" @@ -37,6 +36,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -47,32 +47,10 @@ import ( const logLevel = 2 -type bufferPool struct { - pool sync.Pool -} - -func newBufferPool() *bufferPool { - return &bufferPool{ - pool: sync.Pool{ - New: func() any { - return new(bytes.Buffer) - }, - }, - } -} - -func (p *bufferPool) get() *bytes.Buffer { - return p.pool.Get().(*bytes.Buffer) -} - -func (p *bufferPool) put(b *bytes.Buffer) { - p.pool.Put(b) -} - // recvMsg represents the received msg from the transport. All transport // protocol specific info has been removed. type recvMsg struct { - buffer *bytes.Buffer + buffer mem.Buffer // nil: received some data // io.EOF: stream is completed. data is nil. // other non-nil error: transport failure. data is nil. @@ -102,6 +80,9 @@ func newRecvBuffer() *recvBuffer { func (b *recvBuffer) put(r recvMsg) { b.mu.Lock() if b.err != nil { + // drop the buffer on the floor. Since b.err is not nil, any subsequent reads + // will always return an error, making this buffer inaccessible. + r.buffer.Free() b.mu.Unlock() // An error had occurred earlier, don't accept more // data or errors. @@ -148,45 +129,97 @@ type recvBufferReader struct { ctx context.Context ctxDone <-chan struct{} // cache of ctx.Done() (for performance). recv *recvBuffer - last *bytes.Buffer // Stores the remaining data in the previous calls. + last mem.Buffer // Stores the remaining data in the previous calls. err error - freeBuffer func(*bytes.Buffer) } -// Read reads the next len(p) bytes from last. If last is drained, it tries to -// read additional data from recv. It blocks if there no additional data available -// in recv. If Read returns any non-nil error, it will continue to return that error. -func (r *recvBufferReader) Read(p []byte) (n int, err error) { +func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { if r.err != nil { return 0, r.err } if r.last != nil { - // Read remaining data left in last call. - copied, _ := r.last.Read(p) - if r.last.Len() == 0 { - r.freeBuffer(r.last) + n, r.last = mem.ReadUnsafe(header, r.last) + return n, nil + } + if r.closeStream != nil { + n, r.err = r.readHeaderClient(header) + } else { + n, r.err = r.readHeader(header) + } + return n, r.err +} + +// Read reads the next n bytes from last. If last is drained, it tries to read +// additional data from recv. It blocks if there no additional data available in +// recv. If Read returns any non-nil error, it will continue to return that +// error. +func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { + if r.err != nil { + return nil, r.err + } + if r.last != nil { + buf = r.last + if r.last.Len() > n { + buf, r.last = mem.SplitUnsafe(buf, n) + } else { r.last = nil } - return copied, nil + return buf, nil } if r.closeStream != nil { - n, r.err = r.readClient(p) + buf, r.err = r.readClient(n) } else { - n, r.err = r.read(p) + buf, r.err = r.read(n) } - return n, r.err + return buf, r.err } -func (r *recvBufferReader) read(p []byte) (n int, err error) { +func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readHeaderAdditional(m, header) + } +} + +func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { + select { + case <-r.ctxDone: + return nil, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, n) + } +} + +func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readHeaderAdditional(m, header) + case m := <-r.recv.get(): + return r.readHeaderAdditional(m, header) } } -func (r *recvBufferReader) readClient(p []byte) (n int, err error) { +func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. @@ -207,25 +240,40 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) { // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() - return r.readAdditional(m, p) + return r.readAdditional(m, n) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readAdditional(m, n) } } -func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { +func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { r.recv.load() if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } return 0, m.err } - copied, _ := m.buffer.Read(p) - if m.buffer.Len() == 0 { - r.freeBuffer(m.buffer) - r.last = nil - } else { - r.last = m.buffer + + n, r.last = mem.ReadUnsafe(header, m.buffer) + + return n, nil +} + +func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) { + r.recv.load() + if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } + return nil, m.err + } + + if m.buffer.Len() > n { + m.buffer, r.last = mem.SplitUnsafe(m.buffer, n) } - return copied, nil + + return m.buffer, nil } type streamState uint32 @@ -241,7 +289,7 @@ const ( type Stream struct { id uint32 st ServerTransport // nil for client side Stream - ct *http2Client // nil for server side Stream + ct ClientTransport // nil for server side Stream ctx context.Context // the associated context of the stream cancel context.CancelFunc // always nil for client side Stream done chan struct{} // closed at the end of stream to unblock writers. On the client side. @@ -251,7 +299,7 @@ type Stream struct { recvCompress string sendCompress string buf *recvBuffer - trReader io.Reader + trReader *transportReader fc *inFlow wq *writeQuota @@ -408,7 +456,7 @@ func (s *Stream) TrailersOnly() bool { return s.noHeaders } -// Trailer returns the cached trailer metedata. Note that if it is not called +// Trailer returns the cached trailer metadata. Note that if it is not called // after the entire stream is done, it could return an empty MD. Client // side only. // It can be safely read only after stream has ended that is either read @@ -499,36 +547,87 @@ func (s *Stream) write(m recvMsg) { s.buf.put(m) } -// Read reads all p bytes from the wire for this stream. -func (s *Stream) Read(p []byte) (n int, err error) { +func (s *Stream) ReadHeader(header []byte) (err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.er; er != nil { + return er + } + s.requestRead(len(header)) + for len(header) != 0 { + n, err := s.trReader.ReadHeader(header) + header = header[n:] + if len(header) == 0 { + err = nil + } + if err != nil { + if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + } + return nil +} + +// Read reads n bytes from the wire for this stream. +func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { // Don't request a read if there was an error earlier - if er := s.trReader.(*transportReader).er; er != nil { - return 0, er + if er := s.trReader.er; er != nil { + return nil, er } - s.requestRead(len(p)) - return io.ReadFull(s.trReader, p) + s.requestRead(n) + for n != 0 { + buf, err := s.trReader.Read(n) + var bufLen int + if buf != nil { + bufLen = buf.Len() + } + n -= bufLen + if n == 0 { + err = nil + } + if err != nil { + if bufLen > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + data.Free() + return nil, err + } + data = append(data, buf) + } + return data, nil } -// tranportReader reads all the data available for this Stream from the transport and +// transportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. type transportReader struct { - reader io.Reader + reader *recvBufferReader // The handler to control the window update procedure for both this // particular stream and the associated transport. windowHandler func(int) er error } -func (t *transportReader) Read(p []byte) (n int, err error) { - n, err = t.reader.Read(p) +func (t *transportReader) ReadHeader(header []byte) (int, error) { + n, err := t.reader.ReadHeader(header) if err != nil { t.er = err - return + return 0, err + } + t.windowHandler(len(header)) + return n, nil +} + +func (t *transportReader) Read(n int) (mem.Buffer, error) { + buf, err := t.reader.Read(n) + if err != nil { + t.er = err + return buf, err } - t.windowHandler(n) - return + t.windowHandler(buf.Len()) + return buf, nil } // BytesReceived indicates whether any bytes have been received on this stream. @@ -574,6 +673,7 @@ type ServerConfig struct { ChannelzParent *channelz.Server MaxHeaderListSize *uint32 HeaderTableSize *uint32 + BufferPool mem.BufferPool } // ConnectOptions covers all relevant options for communicating with the server. @@ -612,6 +712,8 @@ type ConnectOptions struct { MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. UseProxy bool + // The mem.BufferPool to use when reading/writing to the wire. + BufferPool mem.BufferPool } // NewClientTransport establishes the transport with the required ConnectOptions @@ -673,7 +775,7 @@ type ClientTransport interface { // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) @@ -725,7 +827,7 @@ type ServerTransport interface { // Write sends the data for the given stream. // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error // WriteStatus sends the status of a stream to the client. WriteStatus is // the final call made on a stream and always occurs. @@ -798,7 +900,7 @@ var ( // connection is draining. This could be caused by goaway or balancer // removing the address. errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") - // errStreamDone is returned from write at the client side to indiacte application + // errStreamDone is returned from write at the client side to indicate application // layer of an error. errStreamDone = errors.New("the stream is done") // StatusGoAway indicates that the server sent a GOAWAY that included this diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go index 34d31b5e7d311..eb42b19fb99a1 100644 --- a/vendor/google.golang.org/grpc/keepalive/keepalive.go +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -34,15 +34,29 @@ type ClientParameters struct { // After a duration of this time if the client doesn't see any activity it // pings the server to see if the transport is still alive. // If set below 10s, a minimum value of 10s will be used instead. - Time time.Duration // The current default value is infinity. + // + // Note that gRPC servers have a default EnforcementPolicy.MinTime of 5 + // minutes (which means the client shouldn't ping more frequently than every + // 5 minutes). + // + // Though not ideal, it's not a strong requirement for Time to be less than + // EnforcementPolicy.MinTime. Time will automatically double if the server + // disconnects due to its enforcement policy. + // + // For more details, see + // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md + Time time.Duration // After having pinged for keepalive check, the client waits for a duration // of Timeout and if no activity is seen even after that the connection is // closed. - Timeout time.Duration // The current default value is 20 seconds. + // + // If keepalive is enabled, and this value is not explicitly set, the default + // is 20 seconds. + Timeout time.Duration // If true, client sends keepalive pings even with no active RPCs. If false, // when there are no active RPCs, Time and Timeout will be ignored and no // keepalive pings will be sent. - PermitWithoutStream bool // false by default. + PermitWithoutStream bool } // ServerParameters is used to set keepalive and max-age parameters on the diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go new file mode 100644 index 0000000000000..c37c58c0233ec --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go @@ -0,0 +1,194 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "sort" + "sync" + + "google.golang.org/grpc/internal" +) + +// BufferPool is a pool of buffers that can be shared and reused, resulting in +// decreased memory allocation. +type BufferPool interface { + // Get returns a buffer with specified length from the pool. + Get(length int) *[]byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +var defaultBufferPoolSizes = []int{ + 256, + 4 << 10, // 4KB (go page size) + 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC) + 32 << 10, // 32KB (default buffer size for io.Copy) + 1 << 20, // 1MB +} + +var defaultBufferPool BufferPool + +func init() { + defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...) + + internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) { + defaultBufferPool = pool + } + + internal.SetBufferPoolingThresholdForTesting = func(threshold int) { + bufferPoolingThreshold = threshold + } +} + +// DefaultBufferPool returns the current default buffer pool. It is a BufferPool +// created with NewBufferPool that uses a set of default sizes optimized for +// expected workflows. +func DefaultBufferPool() BufferPool { + return defaultBufferPool +} + +// NewTieredBufferPool returns a BufferPool implementation that uses multiple +// underlying pools of the given pool sizes. +func NewTieredBufferPool(poolSizes ...int) BufferPool { + sort.Ints(poolSizes) + pools := make([]*sizedBufferPool, len(poolSizes)) + for i, s := range poolSizes { + pools[i] = newSizedBufferPool(s) + } + return &tieredBufferPool{ + sizedPools: pools, + } +} + +// tieredBufferPool implements the BufferPool interface with multiple tiers of +// buffer pools for different sizes of buffers. +type tieredBufferPool struct { + sizedPools []*sizedBufferPool + fallbackPool simpleBufferPool +} + +func (p *tieredBufferPool) Get(size int) *[]byte { + return p.getPool(size).Get(size) +} + +func (p *tieredBufferPool) Put(buf *[]byte) { + p.getPool(cap(*buf)).Put(buf) +} + +func (p *tieredBufferPool) getPool(size int) BufferPool { + poolIdx := sort.Search(len(p.sizedPools), func(i int) bool { + return p.sizedPools[i].defaultSize >= size + }) + + if poolIdx == len(p.sizedPools) { + return &p.fallbackPool + } + + return p.sizedPools[poolIdx] +} + +// sizedBufferPool is a BufferPool implementation that is optimized for specific +// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size +// of 16kb and a sizedBufferPool can be configured to only return buffers with a +// capacity of 16kb. Note that however it does not support returning larger +// buffers and in fact panics if such a buffer is requested. Because of this, +// this BufferPool implementation is not meant to be used on its own and rather +// is intended to be embedded in a tieredBufferPool such that Get is only +// invoked when the required size is smaller than or equal to defaultSize. +type sizedBufferPool struct { + pool sync.Pool + defaultSize int +} + +func (p *sizedBufferPool) Get(size int) *[]byte { + buf := p.pool.Get().(*[]byte) + b := *buf + clear(b[:cap(b)]) + *buf = b[:size] + return buf +} + +func (p *sizedBufferPool) Put(buf *[]byte) { + if cap(*buf) < p.defaultSize { + // Ignore buffers that are too small to fit in the pool. Otherwise, when + // Get is called it will panic as it tries to index outside the bounds + // of the buffer. + return + } + p.pool.Put(buf) +} + +func newSizedBufferPool(size int) *sizedBufferPool { + return &sizedBufferPool{ + pool: sync.Pool{ + New: func() any { + buf := make([]byte, size) + return &buf + }, + }, + defaultSize: size, + } +} + +var _ BufferPool = (*simpleBufferPool)(nil) + +// simpleBufferPool is an implementation of the BufferPool interface that +// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to +// acquire a buffer from the pool but if that buffer is too small, it returns it +// to the pool and creates a new one. +type simpleBufferPool struct { + pool sync.Pool +} + +func (p *simpleBufferPool) Get(size int) *[]byte { + bs, ok := p.pool.Get().(*[]byte) + if ok && cap(*bs) >= size { + *bs = (*bs)[:size] + return bs + } + + // A buffer was pulled from the pool, but it is too small. Put it back in + // the pool and create one large enough. + if ok { + p.pool.Put(bs) + } + + b := make([]byte, size) + return &b +} + +func (p *simpleBufferPool) Put(buf *[]byte) { + p.pool.Put(buf) +} + +var _ BufferPool = NopBufferPool{} + +// NopBufferPool is a buffer pool that returns new buffers without pooling. +type NopBufferPool struct{} + +// Get returns a buffer with specified length from the pool. +func (NopBufferPool) Get(length int) *[]byte { + b := make([]byte, length) + return &b +} + +// Put returns a buffer to the pool. +func (NopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go new file mode 100644 index 0000000000000..228e9c2f20f26 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -0,0 +1,226 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "io" +) + +// BufferSlice offers a means to represent data that spans one or more Buffer +// instances. A BufferSlice is meant to be immutable after creation, and methods +// like Ref create and return copies of the slice. This is why all methods have +// value receivers rather than pointer receivers. +// +// Note that any of the methods that read the underlying buffers such as Ref, +// Len or CopyTo etc., will panic if any underlying buffers have already been +// freed. It is recommended to not directly interact with any of the underlying +// buffers directly, rather such interactions should be mediated through the +// various methods on this type. +// +// By convention, any APIs that return (mem.BufferSlice, error) should reduce +// the burden on the caller by never returning a mem.BufferSlice that needs to +// be freed if the error is non-nil, unless explicitly stated. +type BufferSlice []Buffer + +// Len returns the sum of the length of all the Buffers in this slice. +// +// # Warning +// +// Invoking the built-in len on a BufferSlice will return the number of buffers +// in the slice, and *not* the value returned by this function. +func (s BufferSlice) Len() int { + var length int + for _, b := range s { + length += b.Len() + } + return length +} + +// Ref invokes Ref on each buffer in the slice. +func (s BufferSlice) Ref() { + for _, b := range s { + b.Ref() + } +} + +// Free invokes Buffer.Free() on each Buffer in the slice. +func (s BufferSlice) Free() { + for _, b := range s { + b.Free() + } +} + +// CopyTo copies each of the underlying Buffer's data into the given buffer, +// returning the number of bytes copied. Has the same semantics as the copy +// builtin in that it will copy as many bytes as it can, stopping when either dst +// is full or s runs out of data, returning the minimum of s.Len() and len(dst). +func (s BufferSlice) CopyTo(dst []byte) int { + off := 0 + for _, b := range s { + off += copy(dst[off:], b.ReadOnlyData()) + } + return off +} + +// Materialize concatenates all the underlying Buffer's data into a single +// contiguous buffer using CopyTo. +func (s BufferSlice) Materialize() []byte { + l := s.Len() + if l == 0 { + return nil + } + out := make([]byte, l) + s.CopyTo(out) + return out +} + +// MaterializeToBuffer functions like Materialize except that it writes the data +// to a single Buffer pulled from the given BufferPool. +// +// As a special case, if the input BufferSlice only actually has one Buffer, this +// function simply increases the refcount before returning said Buffer. Freeing this +// buffer won't release it until the BufferSlice is itself released. +func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer { + if len(s) == 1 { + s[0].Ref() + return s[0] + } + sLen := s.Len() + if sLen == 0 { + return emptyBuffer{} + } + buf := pool.Get(sLen) + s.CopyTo(*buf) + return NewBuffer(buf, pool) +} + +// Reader returns a new Reader for the input slice after taking references to +// each underlying buffer. +func (s BufferSlice) Reader() Reader { + s.Ref() + return &sliceReader{ + data: s, + len: s.Len(), + } +} + +// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface +// with other parts systems. It also provides an additional convenience method +// Remaining(), which returns the number of unread bytes remaining in the slice. +// Buffers will be freed as they are read. +type Reader interface { + io.Reader + io.ByteReader + // Close frees the underlying BufferSlice and never returns an error. Subsequent + // calls to Read will return (0, io.EOF). + Close() error + // Remaining returns the number of unread bytes remaining in the slice. + Remaining() int +} + +type sliceReader struct { + data BufferSlice + len int + // The index into data[0].ReadOnlyData(). + bufferIdx int +} + +func (r *sliceReader) Remaining() int { + return r.len +} + +func (r *sliceReader) Close() error { + r.data.Free() + r.data = nil + r.len = 0 + return nil +} + +func (r *sliceReader) freeFirstBufferIfEmpty() bool { + if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) { + return false + } + + r.data[0].Free() + r.data = r.data[1:] + r.bufferIdx = 0 + return true +} + +func (r *sliceReader) Read(buf []byte) (n int, _ error) { + if r.len == 0 { + return 0, io.EOF + } + + for len(buf) != 0 && r.len != 0 { + // Copy as much as possible from the first Buffer in the slice into the + // given byte slice. + data := r.data[0].ReadOnlyData() + copied := copy(buf, data[r.bufferIdx:]) + r.len -= copied // Reduce len by the number of bytes copied. + r.bufferIdx += copied // Increment the buffer index. + n += copied // Increment the total number of bytes read. + buf = buf[copied:] // Shrink the given byte slice. + + // If we have copied all the data from the first Buffer, free it and advance to + // the next in the slice. + r.freeFirstBufferIfEmpty() + } + + return n, nil +} + +func (r *sliceReader) ReadByte() (byte, error) { + if r.len == 0 { + return 0, io.EOF + } + + // There may be any number of empty buffers in the slice, clear them all until a + // non-empty buffer is reached. This is guaranteed to exit since r.len is not 0. + for r.freeFirstBufferIfEmpty() { + } + + b := r.data[0].ReadOnlyData()[r.bufferIdx] + r.len-- + r.bufferIdx++ + // Free the first buffer in the slice if the last byte was read + r.freeFirstBufferIfEmpty() + return b, nil +} + +var _ io.Writer = (*writer)(nil) + +type writer struct { + buffers *BufferSlice + pool BufferPool +} + +func (w *writer) Write(p []byte) (n int, err error) { + b := Copy(p, w.pool) + *w.buffers = append(*w.buffers, b) + return b.Len(), nil +} + +// NewWriter wraps the given BufferSlice and BufferPool to implement the +// io.Writer interface. Every call to Write copies the contents of the given +// buffer into a new Buffer pulled from the given pool and the Buffer is added to +// the given BufferSlice. +func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { + return &writer{buffers: buffers, pool: pool} +} diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go new file mode 100644 index 0000000000000..4d66b2ccc2be6 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffers.go @@ -0,0 +1,252 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package mem provides utilities that facilitate memory reuse in byte slices +// that are used as buffers. +// +// # Experimental +// +// Notice: All APIs in this package are EXPERIMENTAL and may be changed or +// removed in a later release. +package mem + +import ( + "fmt" + "sync" + "sync/atomic" +) + +// A Buffer represents a reference counted piece of data (in bytes) that can be +// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be +// released by calling Free(), which invokes the free function given at creation +// only after all references are released. +// +// Note that a Buffer is not safe for concurrent access and instead each +// goroutine should use its own reference to the data, which can be acquired via +// a call to Ref(). +// +// Attempts to access the underlying data after releasing the reference to the +// Buffer will panic. +type Buffer interface { + // ReadOnlyData returns the underlying byte slice. Note that it is undefined + // behavior to modify the contents of this slice in any way. + ReadOnlyData() []byte + // Ref increases the reference counter for this Buffer. + Ref() + // Free decrements this Buffer's reference counter and frees the underlying + // byte slice if the counter reaches 0 as a result of this call. + Free() + // Len returns the Buffer's size. + Len() int + + split(n int) (left, right Buffer) + read(buf []byte) (int, Buffer) +} + +var ( + bufferPoolingThreshold = 1 << 10 + + bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }} + refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }} +) + +func IsBelowBufferPoolingThreshold(size int) bool { + return size <= bufferPoolingThreshold +} + +type buffer struct { + origData *[]byte + data []byte + refs *atomic.Int32 + pool BufferPool +} + +func newBuffer() *buffer { + return bufferObjectPool.Get().(*buffer) +} + +// NewBuffer creates a new Buffer from the given data, initializing the reference +// counter to 1. The data will then be returned to the given pool when all +// references to the returned Buffer are released. As a special case to avoid +// additional allocations, if the given buffer pool is nil, the returned buffer +// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the +// underlying data is never freed. +// +// Note that the backing array of the given data is not copied. +func NewBuffer(data *[]byte, pool BufferPool) Buffer { + if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) { + return (SliceBuffer)(*data) + } + b := newBuffer() + b.origData = data + b.data = *data + b.pool = pool + b.refs = refObjectPool.Get().(*atomic.Int32) + b.refs.Add(1) + return b +} + +// Copy creates a new Buffer from the given data, initializing the reference +// counter to 1. +// +// It acquires a []byte from the given pool and copies over the backing array +// of the given data. The []byte acquired from the pool is returned to the +// pool when all references to the returned Buffer are released. +func Copy(data []byte, pool BufferPool) Buffer { + if IsBelowBufferPoolingThreshold(len(data)) { + buf := make(SliceBuffer, len(data)) + copy(buf, data) + return buf + } + + buf := pool.Get(len(data)) + copy(*buf, data) + return NewBuffer(buf, pool) +} + +func (b *buffer) ReadOnlyData() []byte { + if b.refs == nil { + panic("Cannot read freed buffer") + } + return b.data +} + +func (b *buffer) Ref() { + if b.refs == nil { + panic("Cannot ref freed buffer") + } + b.refs.Add(1) +} + +func (b *buffer) Free() { + if b.refs == nil { + panic("Cannot free freed buffer") + } + + refs := b.refs.Add(-1) + switch { + case refs > 0: + return + case refs == 0: + if b.pool != nil { + b.pool.Put(b.origData) + } + + refObjectPool.Put(b.refs) + b.origData = nil + b.data = nil + b.refs = nil + b.pool = nil + bufferObjectPool.Put(b) + default: + panic("Cannot free freed buffer") + } +} + +func (b *buffer) Len() int { + return len(b.ReadOnlyData()) +} + +func (b *buffer) split(n int) (Buffer, Buffer) { + if b.refs == nil { + panic("Cannot split freed buffer") + } + + b.refs.Add(1) + split := newBuffer() + split.origData = b.origData + split.data = b.data[n:] + split.refs = b.refs + split.pool = b.pool + + b.data = b.data[:n] + + return b, split +} + +func (b *buffer) read(buf []byte) (int, Buffer) { + if b.refs == nil { + panic("Cannot read freed buffer") + } + + n := copy(buf, b.data) + if n == len(b.data) { + b.Free() + return n, nil + } + + b.data = b.data[n:] + return n, b +} + +// String returns a string representation of the buffer. May be used for +// debugging purposes. +func (b *buffer) String() string { + return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData())) +} + +func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) { + return buf.read(dst) +} + +// SplitUnsafe modifies the receiver to point to the first n bytes while it +// returns a new reference to the remaining bytes. The returned Buffer functions +// just like a normal reference acquired using Ref(). +func SplitUnsafe(buf Buffer, n int) (left, right Buffer) { + return buf.split(n) +} + +type emptyBuffer struct{} + +func (e emptyBuffer) ReadOnlyData() []byte { + return nil +} + +func (e emptyBuffer) Ref() {} +func (e emptyBuffer) Free() {} + +func (e emptyBuffer) Len() int { + return 0 +} + +func (e emptyBuffer) split(int) (left, right Buffer) { + return e, e +} + +func (e emptyBuffer) read([]byte) (int, Buffer) { + return 0, e +} + +type SliceBuffer []byte + +func (s SliceBuffer) ReadOnlyData() []byte { return s } +func (s SliceBuffer) Ref() {} +func (s SliceBuffer) Free() {} +func (s SliceBuffer) Len() int { return len(s) } + +func (s SliceBuffer) split(n int) (left, right Buffer) { + return s[:n], s[n:] +} + +func (s SliceBuffer) read(buf []byte) (int, Buffer) { + n := copy(buf, s) + if n == len(s) { + return n, nil + } + return n, s[n:] +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 1e9485fd6e268..d2e15253bbfbc 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -213,11 +213,6 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { // ValueFromIncomingContext returns the metadata value corresponding to the metadata // key from the incoming metadata if it exists. Keys are matched in a case insensitive // manner. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func ValueFromIncomingContext(ctx context.Context, key string) []string { md, ok := ctx.Value(mdIncomingKey{}).(MD) if !ok { @@ -228,7 +223,7 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // Case insenitive comparison: MD is a map, and there's no guarantee + // Case insensitive comparison: MD is a map, and there's no guarantee // that the MD attached to the context is created using our helper // functions. if strings.EqualFold(k, key) { diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index 73bd63364335e..e87a17f36a50b 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -20,6 +20,7 @@ package grpc import ( "google.golang.org/grpc/codes" + "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -31,9 +32,10 @@ import ( // later release. type PreparedMsg struct { // Struct for preparing msg before sending them - encodedData []byte + encodedData mem.BufferSlice hdr []byte - payload []byte + payload mem.BufferSlice + pf payloadFormat } // Encode marshalls and compresses the message using the codec and compressor for the stream. @@ -57,11 +59,27 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { if err != nil { return err } - p.encodedData = data - compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + + materializedData := data.Materialize() + data.Free() + p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} + + // TODO: it should be possible to grab the bufferPool from the underlying + // stream implementation with a type cast to its actual type (such as + // addrConnStream) and accessing the buffer pool directly. + var compData mem.BufferSlice + compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool()) if err != nil { return err } - p.hdr, p.payload = msgHeader(data, compData) + + if p.pf.isCompressed() { + materializedCompData := compData.Materialize() + compData.Free() + compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} + } + + p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) + return nil } diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh deleted file mode 100644 index 3edca296c224c..0000000000000 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash -# Copyright 2020 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eu -o pipefail - -WORKDIR=$(mktemp -d) - -function finish { - rm -rf "$WORKDIR" -} -trap finish EXIT - -export GOBIN=${WORKDIR}/bin -export PATH=${GOBIN}:${PATH} -mkdir -p ${GOBIN} - -echo "remove existing generated files" -# grpc_testing_not_regenerate/*.pb.go is not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') - -echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" -(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) - -echo "go install cmd/protoc-gen-go-grpc" -(cd cmd/protoc-gen-go-grpc && go install .) - -echo "git clone https://github.com/grpc/grpc-proto" -git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto - -echo "git clone https://github.com/protocolbuffers/protobuf" -git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf - -# Pull in code.proto as a proto dependency -mkdir -p ${WORKDIR}/googleapis/google/rpc -echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" -curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto - -mkdir -p ${WORKDIR}/out - -# Generates sources without the embed requirement -LEGACY_SOURCES=( - ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto - ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto - ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto - ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto - profiling/proto/service.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto -) - -# Generates only the new gRPC Service symbols -SOURCES=( - $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$') - ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto - ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto - ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto - ${WORKDIR}/grpc-proto/grpc/testing/*.proto - ${WORKDIR}/grpc-proto/grpc/core/*.proto -) - -# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an -# import path of 'bar' in the generated code when 'foo.proto' is imported in -# one of the sources. -# -# Note that the protos listed here are all for testing purposes. All protos to -# be used externally should have a go_package option (and they don't need to be -# listed here). -OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ -Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing - -for src in ${SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -for src in ${LEGACY_SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -# The go_package option in grpc/lookup/v1/rls.proto doesn't match the -# current location. Move it into the right place. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 - -# grpc_testing_not_regenerate/*.pb.go are not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go - -cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go index f2efa2a2cb5a9..09e864a89d35b 100644 --- a/vendor/google.golang.org/grpc/resolver/manual/manual.go +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -76,9 +76,11 @@ func (r *Resolver) InitialState(s resolver.State) { // Build returns itself for Resolver, because it's both a builder and a resolver. func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - r.BuildCallback(target, cc, opts) r.mu.Lock() defer r.mu.Unlock() + // Call BuildCallback after locking to avoid a race when UpdateState + // or ReportError is called before Build returns. + r.BuildCallback(target, cc, opts) r.CC = cc if r.lastSeenState != nil { err := r.CC.UpdateState(*r.lastSeenState) diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index c5fb45236faf6..23bb3fb258240 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -66,7 +66,7 @@ func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { // any newly created ccResolverWrapper, except that close may be called instead. func (ccr *ccResolverWrapper) start() error { errCh := make(chan error) - ccr.serializer.Schedule(func(ctx context.Context) { + ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil { return } @@ -85,7 +85,7 @@ func (ccr *ccResolverWrapper) start() error { } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.serializer.Schedule(func(ctx context.Context) { + ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccr.resolver == nil { return } @@ -102,7 +102,7 @@ func (ccr *ccResolverWrapper) close() { ccr.closed = true ccr.mu.Unlock() - ccr.serializer.Schedule(func(context.Context) { + ccr.serializer.TrySchedule(func(context.Context) { if ccr.resolver == nil { return } @@ -177,6 +177,9 @@ func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.P // addChannelzTraceEvent adds a channelz trace event containing the new // state received from resolver implementations. func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + if !logger.V(0) && !channelz.IsOn() { + return + } var updates []string var oldSC, newSC *ServiceConfig var oldOK, newOK bool diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index fdd49e6e91510..2d96f1405e8da 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -19,7 +19,6 @@ package grpc import ( - "bytes" "compress/gzip" "context" "encoding/binary" @@ -35,6 +34,7 @@ import ( "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -220,8 +220,8 @@ type HeaderCallOption struct { HeaderAddr *metadata.MD } -func (o HeaderCallOption) before(c *callInfo) error { return nil } -func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { +func (o HeaderCallOption) before(*callInfo) error { return nil } +func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) { *o.HeaderAddr, _ = attempt.s.Header() } @@ -242,8 +242,8 @@ type TrailerCallOption struct { TrailerAddr *metadata.MD } -func (o TrailerCallOption) before(c *callInfo) error { return nil } -func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o TrailerCallOption) before(*callInfo) error { return nil } +func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) { *o.TrailerAddr = attempt.s.Trailer() } @@ -264,24 +264,20 @@ type PeerCallOption struct { PeerAddr *peer.Peer } -func (o PeerCallOption) before(c *callInfo) error { return nil } -func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o PeerCallOption) before(*callInfo) error { return nil } +func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) { if x, ok := peer.FromContext(attempt.s.Context()); ok { *o.PeerAddr = *x } } -// WaitForReady configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If waitForReady is false and the -// connection is in the TRANSIENT_FAILURE state, the RPC will fail -// immediately. Otherwise, the RPC client will block the call until a -// connection is available (or the call is canceled or times out) and will -// retry the call if it fails due to a transient error. gRPC will not retry if -// data was written to the wire unless the server indicates it did not process -// the data. Please refer to -// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// WaitForReady configures the RPC's behavior when the client is in +// TRANSIENT_FAILURE, which occurs when all addresses fail to connect. If +// waitForReady is false, the RPC will fail immediately. Otherwise, the client +// will wait until a connection becomes available or the RPC's deadline is +// reached. // -// By default, RPCs don't "wait for ready". +// By default, RPCs do not "wait for ready". func WaitForReady(waitForReady bool) CallOption { return FailFastCallOption{FailFast: !waitForReady} } @@ -308,7 +304,7 @@ func (o FailFastCallOption) before(c *callInfo) error { c.failFast = o.FailFast return nil } -func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o FailFastCallOption) after(*callInfo, *csAttempt) {} // OnFinish returns a CallOption that configures a callback to be called when // the call completes. The error passed to the callback is the status of the @@ -343,7 +339,7 @@ func (o OnFinishCallOption) before(c *callInfo) error { return nil } -func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o OnFinishCallOption) after(*callInfo, *csAttempt) {} // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size // in bytes the client can receive. If this is not set, gRPC uses the default @@ -367,7 +363,7 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { c.maxReceiveMessageSize = &o.MaxRecvMsgSize return nil } -func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size // in bytes the client can send. If this is not set, gRPC uses the default @@ -391,7 +387,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { c.maxSendMessageSize = &o.MaxSendMsgSize return nil } -func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {} // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials // for a call. @@ -414,7 +410,7 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error { c.creds = o.Creds return nil } -func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {} // UseCompressor returns a CallOption which sets the compressor used when // sending the request. If WithCompressor is also set, UseCompressor has @@ -442,7 +438,7 @@ func (o CompressorCallOption) before(c *callInfo) error { c.compressorType = o.CompressorType return nil } -func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CompressorCallOption) after(*callInfo, *csAttempt) {} // CallContentSubtype returns a CallOption that will set the content-subtype // for a call. For example, if content-subtype is "json", the Content-Type over @@ -479,7 +475,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { c.contentSubtype = o.ContentSubtype return nil } -func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {} // ForceCodec returns a CallOption that will set codec to be used for all // request and response messages for a call. The result of calling Name() will @@ -515,10 +511,50 @@ type ForceCodecCallOption struct { } func (o ForceCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV1Bridge(o.Codec) return nil } -func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {} + +// ForceCodecV2 returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceCodecV2(codec encoding.CodecV2) CallOption { + return ForceCodecV2CallOption{CodecV2: codec} +} + +// ForceCodecV2CallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ForceCodecV2CallOption struct { + CodecV2 encoding.CodecV2 +} + +func (o ForceCodecV2CallOption) before(c *callInfo) error { + c.codec = o.CodecV2 + return nil +} + +func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {} // CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of // an encoding.Codec. @@ -540,10 +576,10 @@ type CustomCodecCallOption struct { } func (o CustomCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV0Bridge(o.Codec) return nil } -func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. @@ -571,7 +607,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize return nil } -func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {} // The format of the payload: compressed or not? type payloadFormat uint8 @@ -581,19 +617,28 @@ const ( compressionMade payloadFormat = 1 // compressed ) +func (pf payloadFormat) isCompressed() bool { + return pf == compressionMade +} + +type streamReader interface { + ReadHeader(header []byte) error + Read(n int) (mem.BufferSlice, error) +} + // parser reads complete gRPC messages from the underlying reader. type parser struct { // r is the underlying reader. // See the comment on recvMsg for the permissible // error types. - r io.Reader + r streamReader // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte - // recvBufferPool is the pool of shared receive buffers. - recvBufferPool SharedBufferPool + // bufferPool is the pool of shared receive buffers. + bufferPool mem.BufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -608,14 +653,15 @@ type parser struct { // - an error from the status package // // No other error values or types must be returned, which also means -// that the underlying io.Reader must not return an incompatible +// that the underlying streamReader must not return an incompatible // error. -func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { - if _, err := p.r.Read(p.header[:]); err != nil { +func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { + err := p.r.ReadHeader(p.header[:]) + if err != nil { return 0, nil, err } - pf = payloadFormat(p.header[0]) + pf := payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) if length == 0 { @@ -627,20 +673,21 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - msg = p.recvBufferPool.Get(int(length)) - if _, err := p.r.Read(msg); err != nil { + + data, err := p.r.Read(int(length)) + if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } return 0, nil, err } - return pf, msg, nil + return pf, data, nil } // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg any) ([]byte, error) { +func encode(c baseCodec, msg any) (mem.BufferSlice, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -648,7 +695,8 @@ func encode(c baseCodec, msg any) ([]byte, error) { if err != nil { return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) } - if uint(len(b)) > math.MaxUint32 { + if uint(b.Len()) > math.MaxUint32 { + b.Free() return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) } return b, nil @@ -659,34 +707,41 @@ func encode(c baseCodec, msg any) ([]byte, error) { // indicating no compression was done. // // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. -func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { - if compressor == nil && cp == nil { - return nil, nil - } - if len(in) == 0 { - return nil, nil +func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) { + if (compressor == nil && cp == nil) || in.Len() == 0 { + return nil, compressionNone, nil } + var out mem.BufferSlice + w := mem.NewWriter(&out, pool) wrapErr := func(err error) error { + out.Free() return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } - cbuf := &bytes.Buffer{} if compressor != nil { - z, err := compressor.Compress(cbuf) + z, err := compressor.Compress(w) if err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } - if _, err := z.Write(in); err != nil { - return nil, wrapErr(err) + for _, b := range in { + if _, err := z.Write(b.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) + } } if err := z.Close(); err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } } else { - if err := cp.Do(cbuf, in); err != nil { - return nil, wrapErr(err) + // This is obviously really inefficient since it fully materializes the data, but + // there is no way around this with the old Compressor API. At least it attempts + // to return the buffer to the provider, in the hopes it can be reused (maybe + // even by a subsequent call to this very function). + buf := in.MaterializeToBuffer(pool) + defer buf.Free() + if err := cp.Do(w, buf.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) } } - return cbuf.Bytes(), nil + return out, compressionMade, nil } const ( @@ -697,33 +752,36 @@ const ( // msgHeader returns a 5-byte header for the message being transmitted and the // payload, which is compData if non-nil or data otherwise. -func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { +func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) { hdr = make([]byte, headerLen) - if compData != nil { - hdr[0] = byte(compressionMade) - data = compData + hdr[0] = byte(pf) + + var length uint32 + if pf.isCompressed() { + length = uint32(compData.Len()) + payload = compData } else { - hdr[0] = byte(compressionNone) + length = uint32(data.Len()) + payload = data } // Write length of payload into buf - binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) - return hdr, data + binary.BigEndian.PutUint32(hdr[payloadLen:], length) + return hdr, payload } -func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload { return &stats.OutPayload{ Client: client, Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - CompressedLength: len(payload), + Length: dataLength, + WireLength: payloadLength + headerLen, + CompressedLength: payloadLength, SentTime: t, } } -func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status { switch pf { case compressionNone: case compressionMade: @@ -731,7 +789,11 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") } if !haveCompressor { - return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + if isServer { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } else { + return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } } default: return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) @@ -741,104 +803,129 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool type payloadInfo struct { compressedLength int // The compressed length got from wire. - uncompressedBytes []byte + uncompressedBytes mem.BufferSlice +} + +func (p *payloadInfo) free() { + if p != nil && p.uncompressedBytes != nil { + p.uncompressedBytes.Free() + } } // recvAndDecompress reads a message from the stream, decompressing it if necessary. // // Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as // the buffer is no longer needed. -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, -) (uncompressedBuf []byte, cancel func(), err error) { - pf, compressedBuf, err := p.recvMsg(maxReceiveMessageSize) +// TODO: Refactor this function to reduce the number of arguments. +// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +) (out mem.BufferSlice, err error) { + pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { - return nil, nil, err + return nil, err } - if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { - return nil, nil, st.Err() + compressedLength := compressed.Len() + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil { + compressed.Free() + return nil, st.Err() } var size int - if pf == compressionMade { + if pf.isCompressed() { + defer compressed.Free() + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - uncompressedBuf, err = dc.Do(bytes.NewReader(compressedBuf)) + var uncompressedBuf []byte + uncompressedBuf, err = dc.Do(compressed.Reader()) + if err == nil { + out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} + } size = len(uncompressedBuf) } else { - uncompressedBuf, size, err = decompress(compressor, compressedBuf, maxReceiveMessageSize) + out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool) } if err != nil { - return nil, nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if size > maxReceiveMessageSize { + out.Free() // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } else { - uncompressedBuf = compressedBuf + out = compressed } if payInfo != nil { - payInfo.compressedLength = len(compressedBuf) - payInfo.uncompressedBytes = uncompressedBuf - - cancel = func() {} - } else { - cancel = func() { - p.recvBufferPool.Put(&compressedBuf) - } + payInfo.compressedLength = compressedLength + out.Ref() + payInfo.uncompressedBytes = out } - return uncompressedBuf, cancel, nil + return out, nil } // Using compressor, decompress d, returning data and size. // Optionally, if data will be over maxReceiveMessageSize, just return the size. -func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { - dcReader, err := compressor.Decompress(bytes.NewReader(d)) +func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) { + dcReader, err := compressor.Decompress(d.Reader()) if err != nil { return nil, 0, err } - if sizer, ok := compressor.(interface { - DecompressedSize(compressedBytes []byte) int - }); ok { - if size := sizer.DecompressedSize(d); size >= 0 { - if size > maxReceiveMessageSize { - return nil, size, nil - } - // size is used as an estimate to size the buffer, but we - // will read more data if available. - // +MinRead so ReadFrom will not reallocate if size is correct. - // - // TODO: If we ensure that the buffer size is the same as the DecompressedSize, - // we can also utilize the recv buffer pool here. - buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) - bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return buf.Bytes(), int(bytesRead), err - } + + // TODO: Can/should this still be preserved with the new BufferSlice API? Are + // there any actual benefits to allocating a single large buffer instead of + // multiple smaller ones? + //if sizer, ok := compressor.(interface { + // DecompressedSize(compressedBytes []byte) int + //}); ok { + // if size := sizer.DecompressedSize(d); size >= 0 { + // if size > maxReceiveMessageSize { + // return nil, size, nil + // } + // // size is used as an estimate to size the buffer, but we + // // will read more data if available. + // // +MinRead so ReadFrom will not reallocate if size is correct. + // // + // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, + // // we can also utilize the recv buffer pool here. + // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + // return buf.Bytes(), int(bytesRead), err + // } + //} + + var out mem.BufferSlice + _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + if err != nil { + out.Free() + return nil, 0, err } - // Read from LimitReader with limit max+1. So if the underlying - // reader is over limit, the result will be bigger than max. - d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return d, len(d), err + return out, out.Len(), nil } // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - buf, cancel, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { + data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) if err != nil { return err } - defer cancel() - if err := c.Unmarshal(buf, m); err != nil { + // If the codec wants its own reference to the data, it can get it. Otherwise, always + // free the buffers. + defer data.Free() + + if err := c.Unmarshal(data, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } + return nil } @@ -941,7 +1028,7 @@ func setCallInfoCodec(c *callInfo) error { // encoding.Codec (Name vs. String method name). We only support // setting content subtype from encoding.Codec to avoid a behavior // change with the deprecated version. - if ec, ok := c.codec.(encoding.Codec); ok { + if ec, ok := c.codec.(encoding.CodecV2); ok { c.contentSubtype = strings.ToLower(ec.Name()) } } @@ -950,12 +1037,12 @@ func setCallInfoCodec(c *callInfo) error { if c.contentSubtype == "" { // No codec specified in CallOptions; use proto by default. - c.codec = encoding.GetCodec(proto.Name) + c.codec = getCodec(proto.Name) return nil } // c.contentSubtype is already lowercased in CallContentSubtype - c.codec = encoding.GetCodec(c.contentSubtype) + c.codec = getCodec(c.contentSubtype) if c.codec == nil { return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 89f8e4792bf15..d1e1415a40f9b 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -45,6 +45,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -80,7 +81,7 @@ func init() { } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption - internal.RecvBufferPool = recvBufferPool + internal.BufferPool = bufferPool } var statusOK = status.New(codes.OK, "") @@ -170,7 +171,7 @@ type serverOptions struct { maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 - recvBufferPool SharedBufferPool + bufferPool mem.BufferPool waitForHandlers bool } @@ -181,7 +182,7 @@ var defaultServerOptions = serverOptions{ connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, - recvBufferPool: nopBufferPool{}, + bufferPool: mem.DefaultBufferPool(), } var globalServerOptions []ServerOption @@ -313,7 +314,7 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { // Will be supported throughout 1.x. func CustomCodec(codec Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = codec + o.codec = newCodecV0Bridge(codec) }) } @@ -342,7 +343,22 @@ func CustomCodec(codec Codec) ServerOption { // later release. func ForceServerCodec(codec encoding.Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = codec + o.codec = newCodecV1Bridge(codec) + }) +} + +// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new +// CodecV2 interface. +// +// Will be supported throughout 1.x. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codecV2 }) } @@ -592,26 +608,9 @@ func WaitForHandlers(w bool) ServerOption { }) } -// RecvBufferPool returns a ServerOption that configures the server -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: StatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. -// -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { - return recvBufferPool(bufferPool) -} - -func recvBufferPool(bufferPool SharedBufferPool) ServerOption { +func bufferPool(bufferPool mem.BufferPool) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.recvBufferPool = bufferPool + o.bufferPool = bufferPool }) } @@ -622,7 +621,7 @@ func recvBufferPool(bufferPool SharedBufferPool) ServerOption { // workload (assuming a QPS of a few thousand requests/sec). const serverWorkerResetThreshold = 1 << 16 -// serverWorkers blocks on a *transport.Stream channel forever and waits for +// serverWorker blocks on a *transport.Stream channel forever and waits for // data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). @@ -980,6 +979,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ChannelzParent: s.channelz, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, + BufferPool: s.opts.bufferPool, } st, err := transport.NewServerTransport(c, config) if err != nil { @@ -1072,7 +1072,7 @@ var _ http.Handler = (*Server)(nil) // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool) if err != nil { // Errors returned from transport.NewServerHandlerTransport have // already been written to w. @@ -1142,20 +1142,35 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) return err } - compData, err := compress(data, cp, comp) + + compData, pf, err := compress(data, cp, comp, s.opts.bufferPool) if err != nil { + data.Free() channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err) return err } - hdr, payload := msgHeader(data, compData) + + hdr, payload := msgHeader(data, compData, pf) + + defer func() { + compData.Free() + data.Free() + // payload does not need to be freed here, it is either data or compData, both of + // which are already freed. + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > s.opts.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + if payloadLen > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) if err == nil { - for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) + if len(s.opts.statsHandlers) != 0 { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) + } } } return err @@ -1334,37 +1349,37 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor var payInfo *payloadInfo if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } + defer d.Free() if channelz.IsOn() { t.IncrMsgRecv() } df := func(v any) error { - defer cancel() - if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } + for _, sh := range shs { sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, - Length: len(d), + Length: d.Len(), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, - Data: d, }) } if len(binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: d, + Message: d.Materialize(), } for _, binlog := range binlogs { binlog.Log(ctx, cm) @@ -1548,7 +1563,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ctx: ctx, t: t, s: stream, - p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, + p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, @@ -1963,12 +1978,12 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return s.opts.codec } if contentSubtype == "" { - return encoding.GetCodec(proto.Name) + return getCodec(proto.Name) } - codec := encoding.GetCodec(contentSubtype) + codec := getCodec(contentSubtype) if codec == nil { logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) - return encoding.GetCodec(proto.Name) + return getCodec(proto.Name) } return codec } diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go deleted file mode 100644 index 48a64cfe8e256..0000000000000 --- a/vendor/google.golang.org/grpc/shared_buffer_pool.go +++ /dev/null @@ -1,154 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import "sync" - -// SharedBufferPool is a pool of buffers that can be shared, resulting in -// decreased memory allocation. Currently, in gRPC-go, it is only utilized -// for parsing incoming messages. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -type SharedBufferPool interface { - // Get returns a buffer with specified length from the pool. - // - // The returned byte slice may be not zero initialized. - Get(length int) []byte - - // Put returns a buffer to the pool. - Put(*[]byte) -} - -// NewSharedBufferPool creates a simple SharedBufferPool with buckets -// of different sizes to optimize memory usage. This prevents the pool from -// wasting large amounts of memory, even when handling messages of varying sizes. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func NewSharedBufferPool() SharedBufferPool { - return &simpleSharedBufferPool{ - pools: [poolArraySize]simpleSharedBufferChildPool{ - newBytesPool(level0PoolMaxSize), - newBytesPool(level1PoolMaxSize), - newBytesPool(level2PoolMaxSize), - newBytesPool(level3PoolMaxSize), - newBytesPool(level4PoolMaxSize), - newBytesPool(0), - }, - } -} - -// simpleSharedBufferPool is a simple implementation of SharedBufferPool. -type simpleSharedBufferPool struct { - pools [poolArraySize]simpleSharedBufferChildPool -} - -func (p *simpleSharedBufferPool) Get(size int) []byte { - return p.pools[p.poolIdx(size)].Get(size) -} - -func (p *simpleSharedBufferPool) Put(bs *[]byte) { - p.pools[p.poolIdx(cap(*bs))].Put(bs) -} - -func (p *simpleSharedBufferPool) poolIdx(size int) int { - switch { - case size <= level0PoolMaxSize: - return level0PoolIdx - case size <= level1PoolMaxSize: - return level1PoolIdx - case size <= level2PoolMaxSize: - return level2PoolIdx - case size <= level3PoolMaxSize: - return level3PoolIdx - case size <= level4PoolMaxSize: - return level4PoolIdx - default: - return levelMaxPoolIdx - } -} - -const ( - level0PoolMaxSize = 16 // 16 B - level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B - level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB - level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB - level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB -) - -const ( - level0PoolIdx = iota - level1PoolIdx - level2PoolIdx - level3PoolIdx - level4PoolIdx - levelMaxPoolIdx - poolArraySize -) - -type simpleSharedBufferChildPool interface { - Get(size int) []byte - Put(any) -} - -type bufferPool struct { - sync.Pool - - defaultSize int -} - -func (p *bufferPool) Get(size int) []byte { - bs := p.Pool.Get().(*[]byte) - - if cap(*bs) < size { - p.Pool.Put(bs) - - return make([]byte, size) - } - - return (*bs)[:size] -} - -func newBytesPool(size int) simpleSharedBufferChildPool { - return &bufferPool{ - Pool: sync.Pool{ - New: func() any { - bs := make([]byte, size) - return &bs - }, - }, - defaultSize: size, - } -} - -// nopBufferPool is a buffer pool just makes new buffer without pooling. -type nopBufferPool struct { -} - -func (nopBufferPool) Get(length int) []byte { - return make([]byte, length) -} - -func (nopBufferPool) Put(*[]byte) { -} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index fdb0bd65182c5..71195c4943d7a 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -77,9 +77,6 @@ type InPayload struct { // the call to HandleRPC which provides the InPayload returns and must be // copied if needed later. Payload any - // Data is the serialized message payload. - // Deprecated: Data will be removed in the next release. - Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). @@ -150,9 +147,6 @@ type OutPayload struct { // the call to HandleRPC which provides the OutPayload returns and must be // copied if needed later. Payload any - // Data is the serialized message payload. - // Deprecated: Data will be removed in the next release. - Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). Length int diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 8051ef5b514a3..bb2b2a216ce24 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -41,6 +41,7 @@ import ( "google.golang.org/grpc/internal/serviceconfig" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -359,7 +360,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client cs.attempt = a return nil } - if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil { return nil, err } @@ -517,7 +518,7 @@ func (a *csAttempt) newStream() error { } a.s = s a.ctx = s.Context() - a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} + a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} return nil } @@ -566,10 +567,15 @@ type clientStream struct { // place where we need to check if the attempt is nil. attempt *csAttempt // TODO(hedging): hedging will have multiple attempts simultaneously. - committed bool // active attempt committed for retry? - onCommit func() - buffer []func(a *csAttempt) error // operations to replay on retry - bufferSize int // current size of buffer + committed bool // active attempt committed for retry? + onCommit func() + replayBuffer []replayOp // operations to replay on retry + replayBufferSize int // current size of replayBuffer +} + +type replayOp struct { + op func(a *csAttempt) error + cleanup func() } // csAttempt implements a single transport stream attempt within a @@ -607,7 +613,12 @@ func (cs *clientStream) commitAttemptLocked() { cs.onCommit() } cs.committed = true - cs.buffer = nil + for _, op := range cs.replayBuffer { + if op.cleanup != nil { + op.cleanup() + } + } + cs.replayBuffer = nil } func (cs *clientStream) commitAttempt() { @@ -732,7 +743,7 @@ func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { // the stream is canceled. return err } - // Note that the first op in the replay buffer always sets cs.attempt + // Note that the first op in replayBuffer always sets cs.attempt // if it is able to pick a transport and create a stream. if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil @@ -761,7 +772,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) // already be status errors. return toRPCErr(op(cs.attempt)) } - if len(cs.buffer) == 0 { + if len(cs.replayBuffer) == 0 { // For the first op, which controls creation of the stream and // assigns cs.attempt, we need to create a new attempt inline // before executing the first op. On subsequent ops, the attempt @@ -851,25 +862,26 @@ func (cs *clientStream) Trailer() metadata.MD { } func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { - for _, f := range cs.buffer { - if err := f(attempt); err != nil { + for _, f := range cs.replayBuffer { + if err := f.op(attempt); err != nil { return err } } return nil } -func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) { // Note: we still will buffer if retry is disabled (for transparent retries). if cs.committed { return } - cs.bufferSize += sz - if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.replayBufferSize += sz + if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize { cs.commitAttemptLocked() + cleanup() return } - cs.buffer = append(cs.buffer, op) + cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup}) } func (cs *clientStream) SendMsg(m any) (err error) { @@ -891,23 +903,50 @@ func (cs *clientStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > *cs.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + if payloadLen > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize) } + + // always take an extra ref in case data == payload (i.e. when the data isn't + // compressed). The original ref will always be freed by the deferred free above. + payload.Ref() op := func(a *csAttempt) error { - return a.sendMsg(m, hdr, payload, data) + return a.sendMsg(m, hdr, payload, dataLen, payloadLen) + } + + // onSuccess is invoked when the op is captured for a subsequent retry. If the + // stream was established by a previous message and therefore retries are + // disabled, onSuccess will not be invoked, and payloadRef can be freed + // immediately. + onSuccessCalled := false + err = cs.withRetry(op, func() { + cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free) + onSuccessCalled = true + }) + if !onSuccessCalled { + payload.Free() } - err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) if len(cs.binlogs) != 0 && err == nil { cm := &binarylog.ClientMessage{ OnClientSide: true, - Message: data, + Message: data.Materialize(), } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, cm) @@ -924,6 +963,7 @@ func (cs *clientStream) RecvMsg(m any) error { var recvInfo *payloadInfo if len(cs.binlogs) != 0 { recvInfo = &payloadInfo{} + defer recvInfo.free() } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) @@ -931,7 +971,7 @@ func (cs *clientStream) RecvMsg(m any) error { if len(cs.binlogs) != 0 && err == nil { sm := &binarylog.ServerMessage{ OnClientSide: true, - Message: recvInfo.uncompressedBytes, + Message: recvInfo.uncompressedBytes.Materialize(), } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, sm) @@ -958,7 +998,7 @@ func (cs *clientStream) CloseSend() error { // RecvMsg. This also matches historical behavior. return nil } - cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }) if len(cs.binlogs) != 0 { chc := &binarylog.ClientHalfClose{ OnClientSide: true, @@ -1034,7 +1074,7 @@ func (cs *clientStream) finish(err error) { cs.cancel() } -func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -1052,8 +1092,10 @@ func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { } return io.EOF } - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) + if len(a.statsHandlers) != 0 { + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) + } } if channelz.IsOn() { a.t.IncrMsgSent() @@ -1065,6 +1107,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} + defer payInfo.free() } if !a.decompSet { @@ -1083,8 +1126,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { // Only initialize this state once per stream. a.decompSet = true } - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) - if err != nil { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil { if err == io.EOF { if statusErr := a.s.Status().Err(); statusErr != nil { return statusErr @@ -1103,14 +1145,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } for _, sh := range a.statsHandlers { sh.HandleRPC(a.ctx, &stats.InPayload{ - Client: true, - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, + Client: true, + RecvTime: time.Now(), + Payload: m, WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, - Length: len(payInfo.uncompressedBytes), + Length: payInfo.uncompressedBytes.Len(), }) } if channelz.IsOn() { @@ -1122,14 +1162,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF { return a.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } func (a *csAttempt) finish(err error) { @@ -1185,12 +1223,12 @@ func (a *csAttempt) finish(err error) { a.mu.Unlock() } -// newClientStream creates a ClientStream with the specified transport, on the +// newNonRetryClientStream creates a ClientStream with the specified transport, on the // given addrConn. // // It's expected that the given transport is either the same one in addrConn, or // is already closed. To avoid race, transport is specified separately, instead -// of using ac.transpot. +// of using ac.transport. // // Main difference between this and ClientConn.NewStream: // - no retry @@ -1276,7 +1314,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} + as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1373,17 +1411,26 @@ func (as *addrConnStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + // TODO(dfawley): should we be checking len(data) instead? - if len(payld) > *as.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) + if payload.Len() > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) } - if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1423,8 +1470,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Only initialize this state once per stream. as.decompSet = true } - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err != nil { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil { if err == io.EOF { if statusErr := as.s.Status().Err(); statusErr != nil { return statusErr @@ -1444,14 +1490,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF { return as.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } func (as *addrConnStream) finish(err error) { @@ -1645,18 +1689,31 @@ func (ss *serverStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() + // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > ss.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + if payloadLen > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) } if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } + if len(ss.binlogs) != 0 { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() @@ -1669,7 +1726,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } } sm := &binarylog.ServerMessage{ - Message: data, + Message: data.Materialize(), } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, sm) @@ -1677,7 +1734,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) } } return nil @@ -1714,8 +1771,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) { var payInfo *payloadInfo if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil { if err == io.EOF { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} @@ -1733,11 +1791,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) { if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { sh.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - Length: len(payInfo.uncompressedBytes), + RecvTime: time.Now(), + Payload: m, + Length: payInfo.uncompressedBytes.Len(), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, }) @@ -1745,7 +1801,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } if len(ss.binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: payInfo.uncompressedBytes, + Message: payInfo.uncompressedBytes.Materialize(), } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, cm) @@ -1760,23 +1816,26 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { return Method(stream.Context()) } -// prepareMsg returns the hdr, payload and data -// using the compressors passed or using the -// passed preparedmsg -func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +// prepareMsg returns the hdr, payload and data using the compressors passed or +// using the passed preparedmsg. The returned boolean indicates whether +// compression was made and therefore whether the payload needs to be freed in +// addition to the returned data. Freeing the payload if the returned boolean is +// false can lead to undefined behavior. +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { - return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil } // The input interface is not a prepared msg. // Marshal and Compress the data at this point data, err = encode(codec, m) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, 0, err } - compData, err := compress(data, cp, comp) + compData, pf, err := compress(data, cp, comp, pool) if err != nil { - return nil, nil, nil, err + data.Free() + return nil, nil, nil, 0, err } - hdr, payload = msgHeader(data, compData) - return hdr, payload, data, nil + hdr, payload = msgHeader(data, compData, pf) + return hdr, data, payload, pf, nil } diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go index 8b813529c0cc5..0037fee0bd71a 100644 --- a/vendor/google.golang.org/grpc/stream_interfaces.go +++ b/vendor/google.golang.org/grpc/stream_interfaces.go @@ -22,15 +22,35 @@ package grpc // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. type ServerStreamingClient[Res any] interface { + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } // ServerStreamingServer represents the server side of a server-streaming (one // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. +// +// To terminate the response stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type ServerStreamingServer[Res any] interface { + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -39,8 +59,22 @@ type ServerStreamingServer[Res any] interface { // message stream and the type of the unary response message. It is used in // generated code. type ClientStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using CloseAndRecv(). Send(*Req) error + + // CloseAndRecv closes the request stream and waits for the server's + // response. This method must be called once and only once after sending + // all request messages. Any error returned is implemented by the status + // package. CloseAndRecv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } @@ -48,9 +82,28 @@ type ClientStreamingClient[Req any, Res any] interface { // requests, one response) RPC. It is generic over both the type of the request // message stream and the type of the unary response message. It is used in // generated code. +// +// To terminate the RPC, call SendAndClose and return nil from the method +// handler or do not call SendAndClose and return an error from the status +// package. type ClientStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseAndRecv on its + // ClientStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // SendAndClose sends a single response message to the client and closes + // the stream. This method must be called once and only once after all + // request messages have been processed. Recv should not be called after + // calling SendAndClose. SendAndClose(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -59,8 +112,23 @@ type ClientStreamingServer[Req any, Res any] interface { // request message stream and the type of the response message stream. It is // used in generated code. type BidiStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using Recv(). Send(*Req) error + + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, Trailer, and + // CloseSend functionality. No other methods in the ClientStream should be + // called directly. ClientStream } @@ -68,9 +136,27 @@ type BidiStreamingClient[Req any, Res any] interface { // (many requests, many responses) RPC. It is generic over both the type of the // request message stream and the type of the response message stream. It is // used in generated code. +// +// To terminate the stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type BidiStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseSend on its + // BidiStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index bafaef99be989..187fbf1195141 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.65.0" +const Version = "1.67.0" diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml new file mode 100644 index 0000000000000..7348c50c0c3d7 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/.travis.yml @@ -0,0 +1,17 @@ +language: go + +go: + - "1.4.x" + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" + - "1.14.x" + - "tip" + +go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 0000000000000..8dada3edaf50d --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml new file mode 100644 index 0000000000000..8da58fbf6f84a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE new file mode 100644 index 0000000000000..866d74a7ad791 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md new file mode 100644 index 0000000000000..b50c6e8775594 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/README.md @@ -0,0 +1,133 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go new file mode 100644 index 0000000000000..acf71402cf31a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -0,0 +1,744 @@ +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +var disableLineWrapping = false + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + if disableLineWrapping { + emitter.best_width = -1 + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go new file mode 100644 index 0000000000000..129bc2a97d317 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -0,0 +1,815 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + // For an alias node, alias holds the resolved alias. + alias *node + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node + doneInit bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + p.event.typ.String()) + } +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + n.children = append(n.children, p.parse()) + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + n.alias = p.doc.anchors[n.value] + if n.alias == nil { + failf("unknown anchor '%s' referenced", n.value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[*node]bool + mapType reflect.Type + terrors []string + strict bool + + decodeCount int + aliasCount int + aliasDepth int +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: defaultMapType, strict: strict} + d.aliases = make(map[*node]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == yaml_BINARY_TAG { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + return true + } + if resolved != nil { + out.SetString(n.value) + return true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else if tag == yaml_TIMESTAMP_TAG { + // It looks like a timestamp but for backward compatibility + // reasons we set it as a string, so that code that unmarshals + // timestamp-like values into interface{} will continue to + // see a string and not a time.Time. + // TODO(v3) Drop this. + out.Set(reflect.ValueOf(n.value)) + } else { + out.Set(reflect.ValueOf(resolved)) + } + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + return true + } + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + d.setMapIndex(n.children[i+1], out, k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { + if d.strict && out.MapIndex(k) != zeroValue { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) + return + } + out.SetMapIndex(k, v) +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + var doneFields []bool + if d.strict { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.strict { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + d.setMapIndex(n.children[i+1], inlineMap, name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + if n.alias != nil && n.alias.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + if ni.alias != nil && ni.alias.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go new file mode 100644 index 0000000000000..a1c2cc52627f0 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go new file mode 100644 index 0000000000000..0ee738e11b673 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/encode.go @@ -0,0 +1,390 @@ +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// jsonNumber is the interface of the encoding/json.Number datatype. +// Repeating the interface here avoids a dependency on encoding/json, and also +// supports other libraries like jsoniter, which use a similar datatype with +// the same interface. Detecting this interface is useful when dealing with +// structures containing json.Number, which is a string under the hood. The +// encoder should prefer the use of Int64(), Float64() and string(), in that +// order, when encoding this type. +type jsonNumber interface { + Float64() (float64, error) + Int64() (int64, error) + String() string +} + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + // doneInit holds whether the initial stream_start_event has been + // emitted. + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch m := iface.(type) { + case jsonNumber: + integer, err := m.Int64() + if err == nil { + // In this case the json.Number is a valid int64 + in = reflect.ValueOf(integer) + break + } + float, err := m.Float64() + if err == nil { + // In this case the json.Number is a valid float64 + in = reflect.ValueOf(float) + break + } + // fallback case - no number could be obtained + in = reflect.ValueOf(m.String()) + case time.Time, *time.Time: + // Although time.Time implements TextMarshaler, + // we don't want to treat it as a string for YAML + // purposes because YAML has special support for + // timestamps. + case Marshaler: + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + case encoding.TextMarshaler: + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.Type() == ptrTimeType { + e.timev(tag, in.Elem()) + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + if in.Type() == timeType { + e.timev(tag, in) + } else { + e.structv(tag, in) + } + case reflect.Slice, reflect.Array: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = yaml_BINARY_TAG + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go new file mode 100644 index 0000000000000..81d05dfe573f9 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go new file mode 100644 index 0000000000000..7c1f5fac3dbd2 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -0,0 +1,412 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go new file mode 100644 index 0000000000000..4120e0c9160a1 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -0,0 +1,258 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(in) + if ok { + return yaml_TIMESTAMP_TAG, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + } + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + return yaml_STR_TAG, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go new file mode 100644 index 0000000000000..0b9bb6030a0fa --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -0,0 +1,2711 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + if parser.tokens_head != len(parser.tokens) { + // If queue is non-empty, check if any potential simple key may + // occupy the head position. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go new file mode 100644 index 0000000000000..4c45e660a8f2e --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/sorter.go @@ -0,0 +1,113 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go new file mode 100644 index 0000000000000..a2dde608cb7a3 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/writerc.go @@ -0,0 +1,26 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go new file mode 100644 index 0000000000000..30813884c0679 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -0,0 +1,478 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} + +// FutureLineWrap globally disables line wrapping when encoding long strings. +// This is a temporary and thus deprecated method introduced to faciliate +// migration towards v3, which offers more control of line lengths on +// individual encodings, and has a default matching the behavior introduced +// by this function. +// +// The default formatting of v2 was erroneously changed in v2.3.0 and reverted +// in v2.4.0, at which point this function was introduced to help migration. +func FutureLineWrap() { + disableLineWrapping = true +} diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go new file mode 100644 index 0000000000000..f6a9c8e34b1ef --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go new file mode 100644 index 0000000000000..8110ce3c37a6b --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 1502d1f6e62fd..85f92519f0c0f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -9,6 +9,9 @@ cel.dev/expr ## explicit; go 1.16 github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm/winterm +# github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e +## explicit +github.com/Azure/go-ntlmssp # github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab ## explicit github.com/JeffAshton/win_pdh @@ -143,9 +146,15 @@ github.com/fsnotify/fsnotify # github.com/fxamacker/cbor/v2 v2.7.0 ## explicit; go 1.17 github.com/fxamacker/cbor/v2 +# github.com/go-asn1-ber/asn1-ber v1.5.4 +## explicit; go 1.13 +github.com/go-asn1-ber/asn1-ber # github.com/go-errors/errors v1.4.2 ## explicit; go 1.14 github.com/go-errors/errors +# github.com/go-ldap/ldap/v3 v3.4.3 +## explicit; go 1.13 +github.com/go-ldap/ldap/v3 # github.com/go-logr/logr v1.4.2 ## explicit; go 1.18 github.com/go-logr/logr @@ -420,7 +429,9 @@ github.com/munnerz/goautoneg # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f ## explicit github.com/mxk/go-flowrate/flowrate -# github.com/onsi/ginkgo/v2 v2.21.0 +# github.com/onsi/ginkgo/v2 v2.20.1 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 +## explicit; go 1.22.0 +# github.com/onsi/ginkgo/v2 v2.20.2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 ## explicit; go 1.22.0 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config @@ -480,6 +491,244 @@ github.com/opencontainers/runtime-spec/specs-go github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/pkg/pwalkdir +# github.com/openshift-eng/openshift-tests-extension v0.0.0-20241121212100-2e43ae5f86e2 +## explicit; go 1.22.0 +github.com/openshift-eng/openshift-tests-extension/pkg/cmd +github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdinfo +github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdlist +github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun +github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdupdate +github.com/openshift-eng/openshift-tests-extension/pkg/dbtime +github.com/openshift-eng/openshift-tests-extension/pkg/extension +github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests +github.com/openshift-eng/openshift-tests-extension/pkg/flags +github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo +github.com/openshift-eng/openshift-tests-extension/pkg/version +# github.com/openshift/api v0.0.0-20241212053709-6b333900129e => github.com/dusk125/api v0.0.0-20241212053709-6b333900129e +## explicit; go 1.23.0 +github.com/openshift/api/apiserver/v1 +github.com/openshift/api/apps/v1 +github.com/openshift/api/authorization/v1 +github.com/openshift/api/build/v1 +github.com/openshift/api/config/v1 +github.com/openshift/api/config/v1alpha1 +github.com/openshift/api/features +github.com/openshift/api/image/docker10 +github.com/openshift/api/image/dockerpre012 +github.com/openshift/api/image/v1 +github.com/openshift/api/kubecontrolplane/v1 +github.com/openshift/api/network/v1 +github.com/openshift/api/network/v1alpha1 +github.com/openshift/api/oauth/v1 +github.com/openshift/api/operator/v1 +github.com/openshift/api/osin/v1 +github.com/openshift/api/pkg/serialization +github.com/openshift/api/project/v1 +github.com/openshift/api/quota/v1 +github.com/openshift/api/route/v1 +github.com/openshift/api/security +github.com/openshift/api/security/v1 +github.com/openshift/api/template/v1 +github.com/openshift/api/user/v1 +# github.com/openshift/apiserver-library-go v0.0.0-20241212055705-41777f979e50 => github.com/dusk125/apiserver-library-go v0.0.0-20241212055705-41777f979e50 +## explicit; go 1.23.0 +github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy +github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1 +github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/validation +github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/imagereferencemutators +github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/rules +github.com/openshift/apiserver-library-go/pkg/admission/quota/clusterresourcequota +github.com/openshift/apiserver-library-go/pkg/authorization/scope +github.com/openshift/apiserver-library-go/pkg/configflags +github.com/openshift/apiserver-library-go/pkg/labelselector +github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/capabilities +github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/group +github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccadmission +github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccmatching +github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/seccomp +github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/selinux +github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sysctl +github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/user +github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util +github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util/sort +# github.com/openshift/client-go v0.0.0-20241212054934-9d86edf6d385 => github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385 +## explicit; go 1.23.0 +github.com/openshift/client-go/apiserver/applyconfigurations +github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1 +github.com/openshift/client-go/apiserver/applyconfigurations/internal +github.com/openshift/client-go/apiserver/clientset/versioned +github.com/openshift/client-go/apiserver/clientset/versioned/fake +github.com/openshift/client-go/apiserver/clientset/versioned/scheme +github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1 +github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1/fake +github.com/openshift/client-go/apps/applyconfigurations/apps/v1 +github.com/openshift/client-go/apps/applyconfigurations/internal +github.com/openshift/client-go/apps/clientset/versioned +github.com/openshift/client-go/apps/clientset/versioned/scheme +github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1 +github.com/openshift/client-go/apps/informers/externalversions +github.com/openshift/client-go/apps/informers/externalversions/apps +github.com/openshift/client-go/apps/informers/externalversions/apps/v1 +github.com/openshift/client-go/apps/informers/externalversions/internalinterfaces +github.com/openshift/client-go/apps/listers/apps/v1 +github.com/openshift/client-go/authorization/applyconfigurations +github.com/openshift/client-go/authorization/applyconfigurations/authorization/v1 +github.com/openshift/client-go/authorization/applyconfigurations/internal +github.com/openshift/client-go/authorization/clientset/versioned +github.com/openshift/client-go/authorization/clientset/versioned/fake +github.com/openshift/client-go/authorization/clientset/versioned/scheme +github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1 +github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake +github.com/openshift/client-go/authorization/informers/externalversions +github.com/openshift/client-go/authorization/informers/externalversions/authorization +github.com/openshift/client-go/authorization/informers/externalversions/authorization/v1 +github.com/openshift/client-go/authorization/informers/externalversions/internalinterfaces +github.com/openshift/client-go/authorization/listers/authorization/v1 +github.com/openshift/client-go/build/applyconfigurations/build/v1 +github.com/openshift/client-go/build/applyconfigurations/internal +github.com/openshift/client-go/build/clientset/versioned +github.com/openshift/client-go/build/clientset/versioned/scheme +github.com/openshift/client-go/build/clientset/versioned/typed/build/v1 +github.com/openshift/client-go/build/informers/externalversions +github.com/openshift/client-go/build/informers/externalversions/build +github.com/openshift/client-go/build/informers/externalversions/build/v1 +github.com/openshift/client-go/build/informers/externalversions/internalinterfaces +github.com/openshift/client-go/build/listers/build/v1 +github.com/openshift/client-go/config/applyconfigurations +github.com/openshift/client-go/config/applyconfigurations/config/v1 +github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1 +github.com/openshift/client-go/config/applyconfigurations/internal +github.com/openshift/client-go/config/clientset/versioned +github.com/openshift/client-go/config/clientset/versioned/fake +github.com/openshift/client-go/config/clientset/versioned/scheme +github.com/openshift/client-go/config/clientset/versioned/typed/config/v1 +github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake +github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1 +github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake +github.com/openshift/client-go/config/informers/externalversions +github.com/openshift/client-go/config/informers/externalversions/config +github.com/openshift/client-go/config/informers/externalversions/config/v1 +github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1 +github.com/openshift/client-go/config/informers/externalversions/internalinterfaces +github.com/openshift/client-go/config/listers/config/v1 +github.com/openshift/client-go/config/listers/config/v1alpha1 +github.com/openshift/client-go/image/applyconfigurations/image/v1 +github.com/openshift/client-go/image/applyconfigurations/internal +github.com/openshift/client-go/image/clientset/versioned +github.com/openshift/client-go/image/clientset/versioned/scheme +github.com/openshift/client-go/image/clientset/versioned/typed/image/v1 +github.com/openshift/client-go/image/informers/externalversions +github.com/openshift/client-go/image/informers/externalversions/image +github.com/openshift/client-go/image/informers/externalversions/image/v1 +github.com/openshift/client-go/image/informers/externalversions/internalinterfaces +github.com/openshift/client-go/image/listers/image/v1 +github.com/openshift/client-go/network/applyconfigurations/internal +github.com/openshift/client-go/network/applyconfigurations/network/v1 +github.com/openshift/client-go/network/applyconfigurations/network/v1alpha1 +github.com/openshift/client-go/network/clientset/versioned +github.com/openshift/client-go/network/clientset/versioned/scheme +github.com/openshift/client-go/network/clientset/versioned/typed/network/v1 +github.com/openshift/client-go/network/clientset/versioned/typed/network/v1alpha1 +github.com/openshift/client-go/network/informers/externalversions +github.com/openshift/client-go/network/informers/externalversions/internalinterfaces +github.com/openshift/client-go/network/informers/externalversions/network +github.com/openshift/client-go/network/informers/externalversions/network/v1 +github.com/openshift/client-go/network/informers/externalversions/network/v1alpha1 +github.com/openshift/client-go/network/listers/network/v1 +github.com/openshift/client-go/network/listers/network/v1alpha1 +github.com/openshift/client-go/oauth/applyconfigurations/internal +github.com/openshift/client-go/oauth/applyconfigurations/oauth/v1 +github.com/openshift/client-go/oauth/clientset/versioned +github.com/openshift/client-go/oauth/clientset/versioned/scheme +github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1 +github.com/openshift/client-go/oauth/informers/externalversions +github.com/openshift/client-go/oauth/informers/externalversions/internalinterfaces +github.com/openshift/client-go/oauth/informers/externalversions/oauth +github.com/openshift/client-go/oauth/informers/externalversions/oauth/v1 +github.com/openshift/client-go/oauth/listers/oauth/v1 +github.com/openshift/client-go/quota/applyconfigurations/internal +github.com/openshift/client-go/quota/applyconfigurations/quota/v1 +github.com/openshift/client-go/quota/clientset/versioned +github.com/openshift/client-go/quota/clientset/versioned/scheme +github.com/openshift/client-go/quota/clientset/versioned/typed/quota/v1 +github.com/openshift/client-go/quota/informers/externalversions +github.com/openshift/client-go/quota/informers/externalversions/internalinterfaces +github.com/openshift/client-go/quota/informers/externalversions/quota +github.com/openshift/client-go/quota/informers/externalversions/quota/v1 +github.com/openshift/client-go/quota/listers/quota/v1 +github.com/openshift/client-go/route/applyconfigurations/internal +github.com/openshift/client-go/route/applyconfigurations/route/v1 +github.com/openshift/client-go/route/clientset/versioned +github.com/openshift/client-go/route/clientset/versioned/scheme +github.com/openshift/client-go/route/clientset/versioned/typed/route/v1 +github.com/openshift/client-go/route/informers/externalversions +github.com/openshift/client-go/route/informers/externalversions/internalinterfaces +github.com/openshift/client-go/route/informers/externalversions/route +github.com/openshift/client-go/route/informers/externalversions/route/v1 +github.com/openshift/client-go/route/listers/route/v1 +github.com/openshift/client-go/security/applyconfigurations/internal +github.com/openshift/client-go/security/applyconfigurations/security/v1 +github.com/openshift/client-go/security/clientset/versioned +github.com/openshift/client-go/security/clientset/versioned/scheme +github.com/openshift/client-go/security/clientset/versioned/typed/security/v1 +github.com/openshift/client-go/security/informers/externalversions +github.com/openshift/client-go/security/informers/externalversions/internalinterfaces +github.com/openshift/client-go/security/informers/externalversions/security +github.com/openshift/client-go/security/informers/externalversions/security/v1 +github.com/openshift/client-go/security/listers/security/v1 +github.com/openshift/client-go/template/applyconfigurations/internal +github.com/openshift/client-go/template/applyconfigurations/template/v1 +github.com/openshift/client-go/template/clientset/versioned +github.com/openshift/client-go/template/clientset/versioned/scheme +github.com/openshift/client-go/template/clientset/versioned/typed/template/v1 +github.com/openshift/client-go/template/informers/externalversions +github.com/openshift/client-go/template/informers/externalversions/internalinterfaces +github.com/openshift/client-go/template/informers/externalversions/template +github.com/openshift/client-go/template/informers/externalversions/template/v1 +github.com/openshift/client-go/template/listers/template/v1 +github.com/openshift/client-go/user/applyconfigurations +github.com/openshift/client-go/user/applyconfigurations/internal +github.com/openshift/client-go/user/applyconfigurations/user/v1 +github.com/openshift/client-go/user/clientset/versioned +github.com/openshift/client-go/user/clientset/versioned/fake +github.com/openshift/client-go/user/clientset/versioned/scheme +github.com/openshift/client-go/user/clientset/versioned/typed/user/v1 +github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/fake +github.com/openshift/client-go/user/informers/externalversions +github.com/openshift/client-go/user/informers/externalversions/internalinterfaces +github.com/openshift/client-go/user/informers/externalversions/user +github.com/openshift/client-go/user/informers/externalversions/user/v1 +github.com/openshift/client-go/user/listers/user/v1 +# github.com/openshift/library-go v0.0.0-20241212055402-9dbaddb63ab9 => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9 +## explicit; go 1.23.0 +github.com/openshift/library-go/pkg/apiserver/admission/admissionregistrationtesting +github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig +github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout +github.com/openshift/library-go/pkg/apiserver/apiserverconfig +github.com/openshift/library-go/pkg/authorization/authorizationutil +github.com/openshift/library-go/pkg/authorization/hardcodedauthorizer +github.com/openshift/library-go/pkg/authorization/scopemetadata +github.com/openshift/library-go/pkg/client/openshiftrestmapper +github.com/openshift/library-go/pkg/config/client +github.com/openshift/library-go/pkg/config/configdefaults +github.com/openshift/library-go/pkg/config/helpers +github.com/openshift/library-go/pkg/config/validation +github.com/openshift/library-go/pkg/crypto +github.com/openshift/library-go/pkg/image/imageutil +github.com/openshift/library-go/pkg/image/internal/digest +github.com/openshift/library-go/pkg/image/internal/reference +github.com/openshift/library-go/pkg/image/reference +github.com/openshift/library-go/pkg/monitor/health +github.com/openshift/library-go/pkg/network +github.com/openshift/library-go/pkg/oauth/oauthdiscovery +github.com/openshift/library-go/pkg/quota/clusterquotamapping +github.com/openshift/library-go/pkg/quota/quotautil +github.com/openshift/library-go/pkg/route +github.com/openshift/library-go/pkg/route/defaulting +github.com/openshift/library-go/pkg/route/hostassignment +github.com/openshift/library-go/pkg/route/validation +github.com/openshift/library-go/pkg/security/ldaputil +github.com/openshift/library-go/pkg/security/uid # github.com/peterbourgon/diskv v2.0.1+incompatible ## explicit github.com/peterbourgon/diskv @@ -732,6 +981,9 @@ go.opentelemetry.io/proto/otlp/collector/trace/v1 go.opentelemetry.io/proto/otlp/common/v1 go.opentelemetry.io/proto/otlp/resource/v1 go.opentelemetry.io/proto/otlp/trace/v1 +# go.uber.org/atomic v1.7.0 +## explicit; go 1.13 +go.uber.org/atomic # go.uber.org/goleak v1.3.0 ## explicit; go 1.20 go.uber.org/goleak @@ -765,6 +1017,7 @@ golang.org/x/crypto/ed25519 golang.org/x/crypto/hkdf golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 +golang.org/x/crypto/md4 golang.org/x/crypto/nacl/secretbox golang.org/x/crypto/pbkdf2 golang.org/x/crypto/salsa20/salsa @@ -880,16 +1133,22 @@ golang.org/x/tools/internal/versions google.golang.org/genproto/protobuf/field_mask # google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 ## explicit; go 1.21 +# google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f +## explicit; go 1.21 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/api/httpbody # google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 ## explicit; go 1.21 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 +## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status # google.golang.org/grpc v1.65.0 ## explicit; go 1.21 +# google.golang.org/grpc v1.67.0 +## explicit; go 1.21 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -907,7 +1166,9 @@ google.golang.org/grpc/credentials/insecure google.golang.org/grpc/encoding google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto +google.golang.org/grpc/experimental/stats google.golang.org/grpc/grpclog +google.golang.org/grpc/grpclog/internal google.golang.org/grpc/health google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal @@ -931,11 +1192,13 @@ google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig +google.golang.org/grpc/internal/stats google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport google.golang.org/grpc/internal/transport/networktype google.golang.org/grpc/keepalive +google.golang.org/grpc/mem google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/resolver @@ -1006,32 +1269,35 @@ gopkg.in/square/go-jose.v2 gopkg.in/square/go-jose.v2/cipher gopkg.in/square/go-jose.v2/json gopkg.in/square/go-jose.v2/jwt +# gopkg.in/yaml.v2 v2.4.0 +## explicit; go 1.15 +gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.0.0 => ./staging/src/k8s.io/api +# k8s.io/api v0.32.0 => ./staging/src/k8s.io/api ## explicit; go 1.23.0 -# k8s.io/apiextensions-apiserver v0.0.0 => ./staging/src/k8s.io/apiextensions-apiserver +# k8s.io/apiextensions-apiserver v0.32.0 => ./staging/src/k8s.io/apiextensions-apiserver ## explicit; go 1.23.0 -# k8s.io/apimachinery v0.0.0 => ./staging/src/k8s.io/apimachinery +# k8s.io/apimachinery v0.32.0 => ./staging/src/k8s.io/apimachinery ## explicit; go 1.23.0 -# k8s.io/apiserver v0.0.0 => ./staging/src/k8s.io/apiserver +# k8s.io/apiserver v0.32.0 => ./staging/src/k8s.io/apiserver ## explicit; go 1.23.0 # k8s.io/cli-runtime v0.0.0 => ./staging/src/k8s.io/cli-runtime ## explicit; go 1.23.0 -# k8s.io/client-go v0.0.0 => ./staging/src/k8s.io/client-go +# k8s.io/client-go v0.32.0 => ./staging/src/k8s.io/client-go ## explicit; go 1.23.0 # k8s.io/cloud-provider v0.0.0 => ./staging/src/k8s.io/cloud-provider ## explicit; go 1.23.0 # k8s.io/cluster-bootstrap v0.0.0 => ./staging/src/k8s.io/cluster-bootstrap ## explicit; go 1.23.0 -# k8s.io/code-generator v0.0.0 => ./staging/src/k8s.io/code-generator +# k8s.io/code-generator v0.32.0 => ./staging/src/k8s.io/code-generator ## explicit; go 1.23.0 -# k8s.io/component-base v0.0.0 => ./staging/src/k8s.io/component-base +# k8s.io/component-base v0.32.0 => ./staging/src/k8s.io/component-base ## explicit; go 1.23.0 -# k8s.io/component-helpers v0.0.0 => ./staging/src/k8s.io/component-helpers +# k8s.io/component-helpers v0.32.0-rc.1 => ./staging/src/k8s.io/component-helpers ## explicit; go 1.23.0 -# k8s.io/controller-manager v0.0.0 => ./staging/src/k8s.io/controller-manager +# k8s.io/controller-manager v0.32.0-rc.1 => ./staging/src/k8s.io/controller-manager ## explicit; go 1.23.0 # k8s.io/cri-api v0.0.0 => ./staging/src/k8s.io/cri-api ## explicit; go 1.23.0 @@ -1066,9 +1332,9 @@ k8s.io/klog/v2/ktesting k8s.io/klog/v2/ktesting/init k8s.io/klog/v2/test k8s.io/klog/v2/textlogger -# k8s.io/kms v0.0.0 => ./staging/src/k8s.io/kms +# k8s.io/kms v0.32.0 => ./staging/src/k8s.io/kms ## explicit; go 1.23.0 -# k8s.io/kube-aggregator v0.0.0 => ./staging/src/k8s.io/kube-aggregator +# k8s.io/kube-aggregator v0.32.0 => ./staging/src/k8s.io/kube-aggregator ## explicit; go 1.23.0 # k8s.io/kube-controller-manager v0.0.0 => ./staging/src/k8s.io/kube-controller-manager ## explicit; go 1.23.0 @@ -1250,3 +1516,8 @@ sigs.k8s.io/structured-merge-diff/v4/value sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 sigs.k8s.io/yaml/goyaml.v3 +# github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 +# github.com/openshift/api => github.com/dusk125/api v0.0.0-20241212053709-6b333900129e +# github.com/openshift/apiserver-library-go => github.com/dusk125/apiserver-library-go v0.0.0-20241212055705-41777f979e50 +# github.com/openshift/client-go => github.com/dusk125/client-go v0.0.0-20241212054934-9d86edf6d385 +# github.com/openshift/library-go => github.com/dusk125/library-go v0.0.0-20241212055402-9dbaddb63ab9